diff --git "a/3437.jsonl" "b/3437.jsonl" new file mode 100644--- /dev/null +++ "b/3437.jsonl" @@ -0,0 +1,710 @@ +{"seq_id":"348457840","text":"# Make a class LatLon that can be passed parameters `lat` and `lon` to the\n# constructor\n\n# YOUR CODE HERE\nclass LatLon:\n def __init__(self, lat, lon):\n self.lat = lat\n self.lon = lon\n\nexp_latlon = LatLon(88, 90)\n\nprint(exp_latlon.lat)\n\n# Make a class Waypoint that can be passed parameters `name`, `lat`, and `lon` to the\n# constructor. It should inherit from LatLon. Look up the `super` method.\n\nclass Waypoint(LatLon):\n def __init__(self, lat, lon, name):\n super().__init__(name, lon)\n self.lat = lat\n self.lon = lon\n self.name = name\n def __repr__(self):\n return \"Waypoint: lat: %s lon: %s name: %s\" % (self.lat, self.lon, self.name)\n\nexp_waypoint = Waypoint(82, 90, 'place')\nprint('name:', exp_waypoint.lat)\n# Make a class Geocache that can be passed parameters `name`, `difficulty`,\n# `size`, `lat`, and `lon` to the constructor. What should it inherit from?\n\nclass Geocache(Waypoint):\n def __init__(self, lat, lon, name, difficulty, size):\n super().__init__(name, difficulty, size)\n self.difficulty = difficulty\n self.size = size\n self.lat = lat\n self.lon = lon\n self.name = name\n self.size = size\n def __repr__(self):\n return \"Geocache: lat: %s lon: %s difficulty: %s size: %s name: %s\" % (\n self.lat, \n self.lon, \n self.difficulty, \n self.size, \n self.name)\n\nexp_geocache = Geocache(12, 34, 'hard', 900, 'terrain')\n# print(exp_geocache.size)\n\n# Make a new waypoint and print it out: \"Catacombs\", 41.70505, -121.51521\n\nwaypoint = Waypoint(41.70505, -121.51521, 'Catacombs')\n\n# Without changing the following line, how can you make it print into something\n# more human-readable? Hint: Look up the `object.__str__` method\n# print(waypoint)\n\n# Make a new geocache \"Newberry Views\", diff 1.5, size 2, 44.052137, -121.41556\n\n\ngeocache = Geocache(44.052137, -121.41556, 'Newberry Views', 1.5, 2)\n# Print it--also make this print more nicely\nprint(geocache)\n","sub_path":"src/15_classes.py","file_name":"15_classes.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"378348363","text":"import os\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\ninp_dm = 224\n\n\ndef get_labels_as_categorical():\n animals_for_categorical = pd.read_csv('../ImageExperiment/input/train.csv')\n animals_for_categorical.Animal = pd.Categorical(animals_for_categorical.Animal)\n animals_for_categorical['Animal'] = animals_for_categorical.Animal.cat.codes\n return animals_for_categorical['Animal'].values\n\n\ndef get_data_classes():\n animals = pd.read_csv('../ImageExperiment/input/train.csv')\n return animals['Animal'].unique()\n\n\ndef get_file_size():\n path, dirs, files = next(os.walk('../ImageExperiment/input/train/train'))\n return len(files)\n\n\ndef get_images(option, size_of_file):\n pictures = np.zeros((size_of_file, inp_dm, inp_dm, 3))\n for i in range(size_of_file):\n image = cv2.imread('../ImageExperiment/input/' + option + '/' + option + '/Img-{}.jpg'.format(i + 1))\n adjusted_image = cv2.resize(image, (inp_dm, inp_dm))\n pictures[i] = adjusted_image\n if i % 100 == 0:\n print(i)\n return pictures\n\n\ndef conv_net(x_dict, n_classes, dropout_rate, reuse, is_training):\n print(\"This is the Conv\")\n with tf.variable_scope('ConvNet', reuse=reuse):\n print(\"Still Working\")\n x_input = x_dict['images']\n x_input = tf.reshape[-1, inp_dm, inp_dm, 3]\n conv1 = tf.layers.conv2d(x_input, 64, 5, activation=tf.nn.relu, strides=strides, padding=padding)\n conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n\n conv2 = tf.layers.conv2d(conv1, 64, 5, activation=tf.nn.relu, strides=strides, padding=padding)\n conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n\n conv3 = tf.layers.conv2d(conv2, 128, 5, activation=tf.nn.relu, strides=strides, padding=padding)\n conv3 = tf.layers.max_pooling2d(conv3, 2, 2)\n\n fc1 = tf.contrib.layers.flatten(conv3)\n fc1 = tf.layers.dense(fc1, 1024)\n fc1 = tf.layers.dropout(fc1, rate=dropout_rate, training=is_training)\n\n out = tf.layers.dense(fc1, n_classes)\n\n return out\n\n\ndef model_fn(features, labels, mode):\n print(\"Got into model\")\n logits_train = conv_net(features, num_classes, dropout, reuse=False, is_training=True)\n logits_test = conv_net(features, num_classes, dropout, reuse=True, is_training=False)\n\n pred_classes = tf.argmax(logits_test, axis=1)\n pred_probas = tf.nn.softmax(logits_test)\n print(\"Still Working\")\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)\n\n loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())\n\n acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)\n\n estimated_specs = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=pred_classes,\n loss=loss_op,\n train_op=train_op,\n eval_metric_ops={'accuracy': acc_op}\n )\n\n return estimated_specs\n\n\ny = get_labels_as_categorical()\n\nnum_input = get_file_size()\nx = get_images('train', num_input)\n\nx_train, y_train, x_test, y_test = train_test_split(x, y, test_size=0.2, random_state=21)\nprint(x_train.size)\n\n# Training Parameters\nlearning_rate = 0.001\nnum_steps = 600\nbatch_size = 64\n\n\n# Network Parameters\nnum_classes = get_data_classes().size\nprint(num_classes)\nstrides = (3, 3)\npadding = 'same'\ndropout = 0.25\n\nmodel = tf.estimator.Estimator(model_fn)\n\ninput_fn = tf.estimator.inputs.numpy_input_fn(\n x={'images': x_train}, y=y_train, batch_size=batch_size, num_epochs=None, shuffle=True\n)\nmodel.train(input_fn, steps=num_steps)\n\n\ninput_fn = tf.estimator.inputs.numpy_input_fn(\n x={'images': x_test}, y=y_test, batch_size=batch_size, shuffle=False\n)\n\ne = model.evaluate(input_fn)\n\nprint(\"Testing Accuracy:\", e['accuracy'])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"193031588","text":"import boto3\nimport os\nimport sys\n\n\ndef eb_deploy(env_name):\n return os.system(f\"eb deploy {env_name}\")\n\n # for Action debugging: print(f\"eb deploy {env_name}\")\n\n\nif __name__ == \"__main__\":\n client = boto3.client(\"elasticbeanstalk\")\n desc = client.describe_environments(ApplicationName=\"Iaso\")\n eb_envs = {x[\"EnvironmentName\"]: x for x in desc[\"Environments\"]}\n\n if sys.argv[1].lower() in [x.lower() for x in eb_envs.keys()]:\n exit(eb_deploy(sys.argv[1]))\n\n tag_envs = {}\n target_envs = []\n for env_name, env_details in eb_envs.items():\n if env_details[\"Status\"] not in (\"Ready\", \"Updating\"):\n print(\"Env {} ({}) is not ready for deploy, skipping\".format(env_name, env_details[\"Status\"]))\n continue\n raw_tags = client.list_tags_for_resource(ResourceArn=env_details[\"EnvironmentArn\"])\n\n tags = {x[\"Key\"]: x[\"Value\"] for x in raw_tags.get(\"ResourceTags\")}\n tag_envs[env_name] = tags\n if \"env\" in tags and tags[\"env\"].lower() == sys.argv[1].lower():\n target_envs.append(env_name)\n\n if len(target_envs) == 0:\n print(\"No target env found for\", sys.argv[1])\n else:\n for e in target_envs:\n print(\"Deploying to\", e, flush=True)\n r = eb_deploy(e)\n if r != 0:\n sys.exit(r)\n","sub_path":"scripts/eb_deploy.py","file_name":"eb_deploy.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"219599390","text":"'''\nDetermine whether an integer is a palindrome. An integer is a palindrome when it reads the same backward as forward.\n\nExample 1:\n\nInput: 121\nOutput: true\nExample 2:\n\nInput: -121\nOutput: false\nExplanation: From left to right, it reads -121. From right to left, it becomes 121-. Therefore it is not a palindrome.\nExample 3:\n\nInput: 10\nOutput: false\nExplanation: Reads 01 from right to left. Therefore it is not a palindrome.\nFollow up:\n\nCoud you solve it without converting the integer to a string?\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/palindrome-number\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\nfrom collections import deque\nclass Solution:\n\n\tdef isPalindrome(self,x:int)->bool:\n\t\t\treturn str(x)==str(x)[::-1]\n\n\tdef isPalindrome(self,x:int)->bool:\n\t\tif x<0 or (x!=0 and x%10==0):\n\t\t\treturn False\n\t\tbit_deque = deque()\n\t\twhile x:\n\t\t\tbit_deque.append(x%10)\n\t\t\tx = x//10\n\t\twhile len(bit_deque)>1:\n\t\t\tif bit_deque.pop()!=bit_deque.popleft():\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef isPalindrome(self,x):\n\t\t\n\t\tif x<0 or (x!=0 and x%10==0):\n\t\t\treturn False\n\t\tlast_part = 0#一半数字\n\t\twhile x>last_part:\n\t\t\tlast_part = last_part*10+x%10\n\t\t\tx = x//10\n\t\treturn x==last_part or x==last_part//10\n\nif __name__ == '__main__':\n\ts = Solution()\n\tprint(s.isPalindrome(12321))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"简单题/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"582810436","text":"import datetime\nimport shutil\n\nimport numpy as np\nimport json\n\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.python.keras.callbacks import TensorBoard\nfrom tensorflow.python.keras.layers import Dense\nfrom tensorflow.python.keras.optimizers import SGD\n\nfrom db_manager import DBManager\nfrom trainingCallback import TrainingCallback\nimport tensorflow.keras.backend as K\n\nprint('tf.__version__ ', tf.__version__)\n\n\ndef _y_true(y_true, y_pred):\n return y_true\n\n\ndef _y_pred(y_true, y_pred):\n return y_pred\n\n\ndef f1_score(y_true, y_pred):\n # Count positive samples.\n c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))\n c3 = K.sum(K.round(K.clip(y_true, 0, 1)))\n\n # If there are no true samples, fix the F1 score at 0.\n if c3 == 0:\n return 0\n\n # How many selected items are relevant?\n precision = c1 / c2\n\n # How many relevant items are selected?\n recall = c1 / c3\n\n # Calculate f1_score\n f1_score = 2 * (precision * recall) / (precision + recall)\n return f1_score\n\n\ndef recall(y_true, y_pred):\n # Count positive samples.\n c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n c3 = K.sum(K.round(K.clip(y_true, 0, 1)))\n\n # If there are no true samples, fix the F1 score at 0.\n if c3 == 0:\n return 0\n\n # How many relevant items are selected?\n recall = c1 / c3\n\n return recall\n\n\nclass MLPModel:\n\n def __init__(self, train_new=True, batch_size=20, epochs=200, verbose=0):\n\n self._check_point = \"model_weight/cp.ckpt\"\n self._batch_size = batch_size\n self._epoches = epochs\n self._verbose = verbose\n\n sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipvalue=0.5)\n\n self._model = keras.models.Sequential()\n self._model.add(Dense(3, input_dim=7, kernel_initializer='normal', activation='relu'))\n self._model.add(Dense(5, input_dim=3, kernel_initializer='normal', activation='relu'))\n self._model.add(Dense(3, input_dim=5, kernel_initializer='normal', activation='relu'))\n self._model.add(Dense(1, input_dim=3, kernel_initializer='normal', activation='sigmoid'))\n self._model.compile(loss='binary_crossentropy', optimizer=sgd,\n metrics=['accuracy', f1_score, recall, _y_true, _y_pred])\n\n self._training_callback = TrainingCallback(self._model, self._check_point, './logs')\n\n def __sample_generator(self, batch_size, dl_id, dl_name):\n dbManager = DBManager()\n dbManager.init(dl_id, dl_name)\n\n while True:\n try:\n x_batch = np.ones(shape=(batch_size, 7))\n y_batch = np.ones(shape=(batch_size, 1), dtype=np.int32)\n\n histories = list()\n labels = list()\n\n index = 0\n while True:\n dl_hist_id = np.random.randint(low=dbManager.train_min_index(), high=dbManager.train_max_index())\n history, label = dbManager.get_train_data(dl_hist_id)\n\n if history is None:\n continue\n\n histories.append(history)\n labels.append(label)\n\n index += 1\n\n if batch_size == index:\n break\n\n for idx, elem in enumerate(histories):\n x_batch[idx] = elem\n y_batch[idx] = labels[idx]\n\n # print('x_batch: ', x_batch)\n yield x_batch, y_batch\n except Exception as e:\n print(\"Exception: \", e)\n continue\n\n def train(self, epochs=100, eval=10, dl_id=0, dl_name=\"\"):\n # train_start_timestamp = datetime.datetime.now()\n try:\n batch_size = 100\n dbManager = DBManager()\n dbManager.init(dl_id, dl_name)\n\n steps_per_epoch = dbManager.get_steps(batch_size)\n\n print(\"batch size: \", batch_size)\n print(\"steps_per_epoch: \", steps_per_epoch)\n\n # x_test, y_test = dbManager.validation_data()\n\n self._training_callback.set_db_manager(dbManager, epochs)\n\n log_dir = \"./logs\"\n\n try:\n shutil.rmtree(log_dir)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n tensorboard_callback = TensorBoard(log_dir=log_dir,\n histogram_freq=0,\n batch_size=batch_size,\n write_graph=True,\n write_grads=False,\n write_images=False, embeddings_freq=0, embeddings_layer_names=None,\n embeddings_metadata=None,\n embeddings_data=None, update_freq='epoch')\n\n generator = self.__sample_generator(batch_size, dl_id, dl_name)\n self._model.fit_generator(generator,\n steps_per_epoch=5,\n epochs=epochs,\n verbose=0,\n workers=1,\n use_multiprocessing=False,\n callbacks=[self._training_callback, tensorboard_callback])\n\n finally:\n dbManager.close()\n\n def serv(self, dl_id=0, dl_name=\"\"):\n dbManager = DBManager()\n dbManager.init(dl_id, dl_name)\n\n total = dbManager.get_total()\n\n if total == 0:\n return\n\n next_step = int(total / min(100, total))\n count = 0\n progress = 0\n dbManager.set_state_update(progress, 1)\n\n for num, index in enumerate(range(dbManager.serv_min_index(), dbManager.serv_max_index() + 1)):\n\n if num > count + next_step:\n count += next_step\n progress += max(int(100 / total), 1)\n dbManager.set_state_update(progress, 2)\n\n data = dbManager.get_serv_data(index)\n\n if data is None:\n continue\n\n result = self._model.predict(np.array([data]))\n classes = np.round(result[0])\n prob = result[0]\n print(f'[{index}]', 'data: ', data, ' result: ', classes, ' prob: ', prob)\n\n dbManager.update_serv_result(index, int(classes))\n\n dbManager.set_state_update(100, 3)\n\n @staticmethod\n def load_status():\n try:\n with open('status.json', 'r') as f:\n status = json.load(f)\n return status\n except:\n return None\n\n def summary(self):\n return self._model.summary()\n\n\nif __name__ == '__main__':\n m = MLPModel(train_new=False)\n\n # x_train, y_train, x_test, y_test = m.getData()\n # prediction = m.serv(x_test)\n #\n # for i in range(20):\n # print('prediction: ', prediction[i])\n # print('Y: ', y_test[i])\n\n # status = m.load_status()\n # print('epoch: ', status['epoch'])\n # print('loss', status['loss'])\n # print('acc', status['acc'])\n\n # print(m.summary())\n\n m.train()\n","sub_path":"mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"500432428","text":"import numpy as np\n\nfrom chapter03.sectino3_2_activating_function import np_sigmoid\n\n\ndef layer(A, W, b):\n z = np.dot(A, W) + b\n return z\n\n\ndef identity_function(x):\n return x\n\n\ndef middle_layer(A, W, b):\n return np_sigmoid(layer(A, W, b))\n\n\ndef last_layer(A, W, b):\n print(Y)\n return identity_function(layer(A, W, b))\n\n\ndef init_network():\n network = {'1': {}, '2': {}, '3': {}}\n network['1']['W'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['1']['b'] = np.array([0.1, 0.2, 0.3])\n network['2']['W'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['2']['b'] = np.array([0.1, 0.2])\n network['3']['W'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['3']['b'] = np.array([0.1, 0.2])\n return network\n\n\ndef forward(network, x):\n _x = x\n keys = list(network.keys())\n keys.sort()\n for k in keys[:len(keys) - 1]:\n print(\"Z{}\".format(k))\n _x = middle_layer(_x, **network[k])\n print(_x)\n print(\"Y\")\n y = last_layer(_x, **network[str(keys.pop(-1))])\n print(y)\n return y\n\n\nif __name__ == '__main__':\n X = np.array([1.0, 0.5])\n W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n B1 = np.array([0.1, 0.2, 0.3])\n print(\"A1 : {}\".format(layer(X, W1, B1)))\n Z1 = np_sigmoid(layer(X, W1, B1))\n print(\"Z1 : {}\".format(Z1))\n\n W2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n B2 = np.array([0.1, 0.2])\n print(\"A2 : {}\".format(layer(Z1, W2, B2)))\n Z2 = np_sigmoid(layer(Z1, W2, B2))\n print(\"Z2 : {}\".format(Z2))\n\n W3 = np.array([[0.1, 0.3], [0.2, 0.4]])\n B3 = np.array([0.1, 0.2])\n print(\"A3 : {}\".format(layer(Z2, W3, B3)))\n Y = identity_function(layer(Z2, W3, B3))\n print(\" Y : {}\".format(Y))\n\n network = init_network()\n forward(network, X)\n","sub_path":"chapter03/section3_4_three_layer_NN.py","file_name":"section3_4_three_layer_NN.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"474270844","text":"\"\"\" Classes and examples for searching for flights using SkyPicker. \"\"\"\nfrom __future__ import unicode_literals, absolute_import, generators, \\\n print_function\n\nimport requests\nfrom datetime import datetime\n\n\nclass SkyPickerApi(object):\n \"\"\" SkyPicker API. \"\"\"\n def __init__(self):\n \"\"\" Initializes the API object with URL attributes. \"\"\"\n self.base_url = 'https://api.skypicker.com/'\n self.path = ''\n self.param_str = ''\n\n @property\n def full_url(self):\n \"\"\" Returns the full URL for requesting the data. \"\"\"\n return '{}{}{}'.format(self.base_url, self.path, self.param_str)\n\n def get_request(self):\n \"\"\" Requests the API endpoint and returns the response \"\"\"\n headers = {'content-type': 'application/json'}\n resp = requests.get(self.full_url, headers=headers)\n return resp.json()\n\n def search_places(self, place_name, locale=None):\n \"\"\" Finds matching place API ids to use for searches.\n :param place_name: string of the place name to search for\n :kwarg locale: two letter lowercase locale string\n\n returns JSON response\n \"\"\"\n self.path = 'locations'\n self.param_str = '?term={}'.format(place_name)\n if locale:\n self.param_str += '&locale={}'.format(locale)\n return self.get_request()\n\n def search_flights(self, origin, start_date, end_date,\n num_passengers, offset):\n \"\"\" Searches for flights given a time range and origin.\n :param origin: string representing the ID or IATA\n :param start_date: datetime representing first possible travel date\n :param end_date: datetime representing last possible travel date\n :param num_passengers: integer\n\n returns JSON response\n \"\"\"\n start_date = datetime.strptime(start_date, '%Y-%m-%d')\n end_date = datetime.strptime(end_date, '%Y-%m-%d')\n self.path = 'flights'\n self.param_str = '?limit=200&flyFrom=' + \\\n '{}&dateFrom={}&dateTo={}&returnFrom={}&returnTo={}&passengers={}&curr=USD&typeFlight=roundtrip&offset={}'.format(\n origin,\n start_date.strftime('%d/%m/%Y'),\n start_date.strftime('%d/%m/%Y'),\n end_date.strftime('%d/%m/%Y'),\n end_date.strftime('%d/%m/%Y'), num_passengers, offset)\n resp = self.get_request()\n flights = {}\n for flight in resp.get('data'):\n departure_time = datetime.fromtimestamp(flight.get('dTime'))\n arrival_time = datetime.fromtimestamp(flight.get('aTime'))\n return_departure_time = datetime.fromtimestamp(flight.get('route')[-1]['dTime'])\n return_arrival_time = datetime.fromtimestamp(flight.get('route')[-1]['aTime'])\n\n flight_info = {\n 'cityTo': flight.get('cityTo').encode('ascii', 'ignore'),\n 'countryTo': flight.get('countryTo').get('name').encode('ascii', 'ignore'),\n 'cityFrom': flight.get('cityFrom').encode('ascii', 'ignore'),\n 'price': flight.get('price'),\n 'token': flight.get('booking_token'),\n 'departure_time': departure_time.strftime('%-I:%M %p %b %-d, %Y'),\n 'arrival_time': arrival_time.strftime('%-I:%M %p %b %-d, %Y'),\n 'return_departure_time': return_departure_time.strftime('%-I:%M %p %b %-d, %Y'),\n 'return_arrival_time': return_arrival_time.strftime('%-I:%M %p %b %-d, %Y'),\n 'flyTo': flight.get('flyTo'),\n 'lat': flight.get('route')[0]['latFrom'],\n 'lng': flight.get('route')[0]['lngFrom']\n }\n flights[flight.get('flyTo')] = flight_info\n\n origin_airport_code = flight.get('flyFrom').encode('ascii', 'ignore')\n if not flights.get(origin_airport_code, None):\n flights[origin_airport_code] = {\n 'cityTo': flight.get('cityFrom').encode('ascii', 'ignore'),\n 'countryTo': flight.get('countryFrom').get('name').encode('ascii', 'ignore'),\n 'cityFrom': flight.get('cityFrom').encode('ascii', 'ignore'),\n 'price': 0,\n 'token': None,\n 'departure_time': 'n/a',\n 'arrival_time': 'n/a',\n 'flyTo': origin_airport_code,\n 'lat': flight.get('route')[0]['latFrom'],\n 'lng': flight.get('route')[0]['lngFrom']\n }\n # Slows down a bit here, may be better just use Google's api\n # To convert place name to lat and lat when user clicks show activities button\n # Need to do performance tests\n #for r in flight.get('route'):\n # if r['cityTo'] == flight.get('cityTo'):\n # flight_info['dest_lat'] = r['latTo']\n # flight_info['dest_lng'] = r['lngTo']\n\n # Can maybe use some kind of call like this for the button\n #place = self.search_places(flight_info['cityTo'], locale='en')\n #flight_info['dest_lat'] = place.get('locations')[0]['location']['lat']\n #flight_info['dest_lng'] = place.get('locations')[0]['location']['lon']\n\n return flights\n","sub_path":"sky_picker.py","file_name":"sky_picker.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"165981719","text":"# coding: utf-8\nfrom cgi import escape\nimport os\n\nfrom flask import Flask, request, redirect, url_for\nimport logbook\nfrom logbook import warn, debug\nfrom logbook.compat import RedirectLoggingHandler\n\nfrom pygift.conf import config\nfrom pygift import proxy, tools\n\n\nURL_PREFIX = config.data['url_prefix']\n\ncore_log_handler = logbook.RotatingFileHandler(os.path.join(config.data['log_dir'], 'app.log'))\ncore_log_handler.push_application()\n\n\napp = Flask(__name__)\napp.logger.addHandler(RedirectLoggingHandler())\n\n\n@app.route(URL_PREFIX + '/simple//')\ndef simple_pkg(pkg):\n u\"\"\"\n Зараза pip дёргает эту ручку даже если версия пакета прибита гвоздями.\n Было бы прикольно её поддерживать, чтобы уметь отдавать номер самой последней версии.\n Но пока как это делать быстро, непонятно :(\n \"\"\"\n debug(\"pip asking for {!r} available versions\", pkg)\n if tools.can_be_proxied(pkg):\n return proxy.simple_pkg(pkg)\n return ''\n\n\n@app.route(URL_PREFIX + '/simple///')\ndef simple_pkg_ver(pkg, ver):\n u\"\"\"\n pip справшивает где скачать пакет такой-то версии — даём ссылку на самих себя\n \"\"\"\n from pygift.tools import version_validate\n\n if tools.can_be_proxied(pkg):\n return proxy.simple_pkg_ver(pkg, ver)\n\n # если пакет публичный - его могут попробовать поставить по обычному номеру версии\n if not version_validate(ver):\n if not tools.is_public(pkg):\n warn(\"unsupported version requested: {!r}=={!r}\", pkg, ver)\n return ''\n else:\n debug(\"unsupported version format, yet public pkg, simulating proxy: {!r}=={!r}\", pkg, ver)\n return proxy.json2simple_pkg_ver(pkg, ver)\n\n url = url_for('pkg_generate', pkg=pkg, ver=ver)\n return '{pkg}-{ver}'.format(url=escape(url), pkg=escape(pkg), ver=escape(ver))\n\n\n@app.route(URL_PREFIX + '/generate/-.tar')\ndef pkg_generate(pkg, ver):\n from pygift.generator import _pkg_generate\n return _pkg_generate(pkg, ver)\n\n\n@app.route(URL_PREFIX + '/simple/')\ndef simple_pkg_redirect(pkg):\n u\"\"\"\n pip всегда запрашивает этот url без '/' на конце,\n а pypi всегда посылает на url с ним\n не будем нарушать традиции, но 301 как бы намекает\n \"\"\"\n return redirect(request.url + '/', code=301)\n\n\n@app.route(URL_PREFIX + '/simple//')\ndef simple_pkg_ver_redirect(git, pkg, ver):\n u\"\"\"\n аналогично `simple_pkg_redirect`\n \"\"\"\n return redirect(request.url + '/', code=301)\n\n\nproxy.setup(app)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0', port=11282)\n","sub_path":"pygift/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"265307360","text":"#学习文本是否含有对主题的看法\ndata_path='/home/zju/buwenfeng/TensorFlowDemo/data/NLPCC2016_Stance_Detection_Test_Datasets'\nimport numpy as np\ntrain_or_test ='train'\nmaxlen=400\nsave_file_path ='/home/zju/buwenfeng/autosave'\nload_model_path ='/home/zju/buwenfeng/autosave/stenceweibo1413/my_model.h5'\nbert_path='/home/zju/buwenfeng/bert/chinese_L-12_H-768_A-12/'\nfrom keras_bert import load_trained_model_from_checkpoint, Tokenizer\nimport codecs\nrelative_path=bert_path\nconfig_path =relative_path+ 'bert_config.json'\ncheckpoint_path = relative_path+ 'bert_model.ckpt'\ndict_path =relative_path+'vocab.txt'\ntoken_dict = {}\nwith codecs.open(dict_path, 'r', 'utf8') as reader:\n for line in reader:\n token = line.strip()\n token_dict[token] = len(token_dict)\n\ntokenizer = Tokenizer(token_dict)\ntrain_epochs =5\nmode = 'QA-B'\ntopics=['IphoneSE','春节放鞭炮','俄罗斯在叙利亚的反恐行动','开放二胎','深圳禁摩限电']\nLDA_topics=['iPhoneSE苹果手机用户5s6s外观屏幕小大英寸配置内存性能价格便宜市场发布系统功能设计',\n '春节放鞭炮新年过年春联燃放烟花爆竹环保减少环境污染空气倡议呼吁少放孩子安全传统节日文化习俗民俗',\n '俄罗斯在叙利亚的反恐行动土耳其美国中国沙特反恐恐怖分子is恐怖组织反对派伊斯兰中东北约总统大国普京毛子亲人飞机轰炸打击',\n '开放二胎政策国家人口问题女性怀孕结婚不要孩子妈妈工作家庭计划生育生二胎父母一起宝宝',\n '深圳禁摩限电电动车自行车交警执法快递外卖摩托车电摩三轮车整治非法拘留'\n ]\ntopicDict = dict((ch,ind)for ch,ind in zip(topics,LDA_topics))\ndef load_data(path):\n data=[]\n with open(path,'r') as f:\n for line in f:\n val =line.replace('\\n','')\n data.append(val)\n return data\n\nimport os\ntrain_topic = load_data(os.path.join(data_path,'train_topic.txt'))\ntest_topic =load_data(os.path.join(data_path,'test_topic.txt'))\ntrain_text =load_data(os.path.join(data_path,'train_text.txt'))\ntest_text = load_data(os.path.join(data_path,'test_text.txt'))\ntrain_stance = load_data(os.path.join(data_path,'train_stance.txt'))\ntest_stance = load_data(os.path.join(data_path,'test_stance.txt'))\nQAB_st=['我反对','我支持','我不知道']\n\ndef genete_data1(topic,texts,stances,mode = 'QA-B'):\n Yes = 0\n No = 1\n x1=[]\n x2=[]\n target=[]\n ids=[]\n for ix,x in enumerate(texts):\n top_ix =topicDict[topic[ix]]\n if mode == 'QA-B': # 我反对\n for senti_st in QAB_st:\n indices, segments = tokenizer.encode(first=x, second=senti_st + top_ix, max_len=maxlen)\n x1.append(indices)\n x2.append(segments)\n ids.append(ix)\n if stances[ix] == 'AGAINST':\n target.append(Yes)\n target.append(No)\n target.append(No)\n elif stances[ix] == 'FAVOR':\n target.append(No)\n target.append(Yes)\n target.append(No)\n elif stances[ix] == 'NONE':\n target.append(No)\n target.append(No)\n target.append(Yes)\n else:\n target.append(No)\n target.append(No)\n target.append(No)\n return x1,x2,target,ids\n\ntrain_x1,train_x2,train_target,_ = genete_data1(train_topic,train_text,train_stance)\ntest_x1,test_x2,test_target,test_id = genete_data1(test_topic,test_text,test_stance)\n\nfrom keras.layers import *\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nbert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\nfor l in bert_model.layers:\n l.trainable = True\n\nx1_in = Input(shape=(maxlen,))\nx2_in = Input(shape=(maxlen,))\nx = bert_model([x1_in, x2_in])\nx = Lambda(lambda x: x[:, 0])(x)\np = Dense(1, activation='sigmoid')(x)\n\nmodel = Model([x1_in, x2_in], p)\nmodel.compile(\n loss='binary_crossentropy',\n optimizer=Adam(2e-5),\n metrics=['accuracy'])\n\nimport datetime,os\ntime_id = str(datetime.datetime.now().hour) + str(datetime.datetime.now().minute)\nimport keras\ndef run_model(train_or_test ='train',model_path =None):\n if train_or_test =='train':\n save_path = '/home/zju/buwenfeng/autosave/stenceweibo' + time_id + \"/\";\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n callbacks_list = [\n keras.callbacks.EarlyStopping(\n monitor='accuracy',\n patience=1, # 大于1轮,也就是两轮没进步就停下来\n ),\n keras.callbacks.ModelCheckpoint(\n filepath=save_path + 'my_model.h5',\n monitor='val_loss', # val loss 没有改变就不需要覆盖模型\n save_best_only=True,\n )]\n history = model.fit([train_x1,train_x2], np.array(train_target),\n epochs=train_epochs,\n batch_size=4,\n validation_split=0.2,\n callbacks=callbacks_list)\n elif train_or_test =='test':\n model.load_weights(model_path)\n\nrun_model(train_or_test,load_model_path)\n\n\nresults = model.evaluate([test_x1,test_x2],np.array(test_target))\nprint(results)\n\npredictions = model.predict([test_x1,test_x2])\npred_out = [1if p>0.5 else 0 for p in predictions]\nnp.savetxt(os.path.join(save_file_path,time_id+'pred.txt'), pred_out, fmt = \"%d\", delimiter = \",\")\nnp.savetxt(os.path.join(save_file_path,time_id+'act.txt'), test_target, fmt = \"%d\", delimiter = \",\")\nnp.savetxt(os.path.join(save_file_path,time_id+'test_id.txt'), test_id, fmt = \"%d\", delimiter = \",\")\n\nfpred=[]\nfact=[]\nimport numpy as np\nfor ix,x in enumerate(test_id):\n if(ix%3==0):\n pred_sum=np.sum(pred_out[ix:ix+3])\n if(pred_sum==2):\n pred_ix=[ip for ip,p in enumerate(pred_out[ix:ix+3]) if p==0][0]\n elif(pred_sum==3):\n pred_ix=2\n elif(pred_sum==1):\n pred_ix = [ip for ip,p in enumerate(pred_out[ix:ix+3]) if p==1][0]\n else:\n pred_ix = 2\n act_ix =[ip for ip,p in enumerate(test_target[ix:ix+3]) if p==0][0]\n fpred.append(pred_ix)\n fact.append(act_ix)\n\nfrom sklearn.metrics import classification_report\ntarget_names =['against','favor','none']\nprint(classification_report(fact,fpred,target_names=target_names))\n","sub_path":"process_data/QAA.py","file_name":"QAA.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"298573584","text":"from URORobotLib import *\nimport math, numpy as np\n\nrob = RobotCakru(nama='Lolz1')\n\ndef putarKe(sudut):\n dat = rob.status()\n arah = dat['Arah']\n CW = (arah-sudut)%360\n CCW = (sudut-arah)%360\n if(CW < CCW):\n rob.putarCW(CW)\n else:\n rob.putarCCW(CCW)\n\ndef gerakKe(x,y):\n dat = rob.status()\n xPos, yPos = dat['Posisi']\n if(x != xPos):\n sudut = np.rad2deg(math.atan((y-yPos)/(x-xPos))) % 180\n if(x > xPos):\n if(y >= yPos):\n if(y == yPos):\n sudut = 0\n else:\n # x > xPos && y < yPos\n sudut += 180\n else:\n if(y >= yPos):\n if(y == yPos):\n sudut = 180\n else:\n # x > xPos && y < yPos\n sudut += 180\n else:\n if(y >= yPos):\n if(y > yPos):\n sudut = 90\n else:\n sudut = 270\n if(xPos != x or yPos != y):\n putarKe(sudut)\n maju = math.sqrt((x-xPos)**2+(y-yPos)**2)\n rob.maju(maju)\n \n\n\nfor i in range(10):\n print('Ke Bagian Bawah Kiri')\n gerakKe(100,100)\n print('Ke Bagian Bawah Kanan')\n gerakKe(400,100)\n print('Ke Bagian Atas Kanan')\n gerakKe(400,400)\n print('Ke Bagian Atas Kiri')\n gerakKe(100,400)\n \n","sub_path":"robot1.py","file_name":"robot1.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"601304856","text":"import frappe\nfrom frappe import _\nfrom frappe.utils import getdate, cint, cstr, random_string, now_datetime, data as convert_string_to_datetime\nfrom frappe.client import get_list\nimport pandas as pd\nimport json, base64, ast, itertools, datetime\nfrom frappe.client import attach_file\nfrom one_fm.one_fm.page.roster.roster import get_post_view as _get_post_view # , get_roster_view as _get_roster_view\nfrom one_fm.api.v1.utils import response\nfrom one_fm.one_fm.page.roster.employee_map import CreateMap, PostMap\n\n\n# @frappe.whitelist()\n# def get_roster_view(start_date, end_date, all=1, assigned=0, scheduled=0, project=None, site=None, shift=None, department=None, operations_role=None):\n# \ttry:\n# \t\treturn _get_roster_view(start_date, end_date, all, assigned, scheduled, project, site, shift, department, operations_role)\n# \texcept Exception as e:\n# \t\treturn frappe.utils.response.report_error(e.http_status_code)\n\n@frappe.whitelist()\ndef get_roster_view(date, shift=None, site=None, project=None, department=None):\n try:\n filters = {\n 'date': date\n }\n if project:\n filters.update({'project': project})\n if site:\n filters.update({'site': site})\n if shift:\n filters.update({'shift': shift})\n if department:\n filters.update({'department': department})\n\n fields = [\"employee\", \"employee_name\", \"date\", \"operations_role\", \"post_abbrv\", \"employee_availability\",\n \"shift\"]\n user, user_roles, user_employee = get_current_user_details()\n print(user_roles)\n if \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles:\n projects = get_assigned_projects(user_employee.name)\n assigned_projects = []\n for assigned_project in projects:\n assigned_projects.append(assigned_project.name)\n\n filters.update({\"project\": (\"in\", assigned_projects)})\n roster = frappe.get_all(\"Employee Schedule\", filters, fields)\n master_data = []\n for key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['operations_role'])):\n employees = list(group)\n master_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\n return master_data\n\n elif \"Site Supervisor\" in user_roles:\n sites = get_assigned_sites(user_employee.name, project)\n assigned_sites = []\n for assigned_site in sites:\n assigned_sites.append(assigned_site.name)\n filters.update({\"site\": (\"in\", assigned_sites)})\n roster = frappe.get_all(\"Employee Schedule\", filters, fields)\n print(roster)\n master_data = []\n for key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['operations_role'])):\n employees = list(group)\n master_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\n return master_data\n\n elif \"Shift Supervisor\" in user_roles:\n shifts = get_assigned_shifts(user_employee.name, site)\n assigned_shifts = []\n for assigned_shift in shifts:\n assigned_shifts.append(assigned_shift.name)\n filters.update({\"shift\": (\"in\", assigned_shifts)})\n\n roster = frappe.get_all(\"Employee Schedule\", filters, fields)\n master_data = []\n for key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['operations_role'])):\n employees = list(group)\n master_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\n return master_data\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_weekly_staff_roster(start_date, end_date):\n try:\n user, user_roles, user_employee = get_current_user_details()\n roster = frappe.db.sql(\"\"\"\n\t\t\tSELECT shift, employee, date, employee_availability, operations_role\n\t\t\tFROM `tabEmployee Schedule`\n\t\t\tWHERE employee=\"{emp}\"\n\t\t\tAND date BETWEEN date(\"{start_date}\") AND date(\"{end_date}\")\n\t\t\"\"\".format(emp=user_employee.name, start_date=start_date, end_date=end_date), as_dict=1)\n print(roster)\n return roster\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_current_user_details():\n user = frappe.session.user\n user_roles = frappe.get_roles(user)\n user_employee = frappe.get_value(\"Employee\", {\"user_id\": user},\n [\"name\", \"employee_id\", \"employee_name\", \"image\", \"enrolled\", \"designation\"],\n as_dict=1)\n return user, user_roles, user_employee\n\n\n@frappe.whitelist()\ndef get_post_view(date, shift=None, site=None, project=None, department=None):\n try:\n filters = {\n 'date': date\n }\n if project:\n filters.update({'project': project})\n if site:\n filters.update({'site': site})\n if shift:\n filters.update({'shift': shift})\n if department:\n filters.update({'department': department})\n\n fields = [\"post\", \"post_status\", \"date\", \"operations_role\", \"shift\"]\n user, user_roles, user_employee = get_current_user_details()\n\n if \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles:\n projects = get_assigned_projects(user_employee.name)\n assigned_projects = []\n for assigned_project in projects:\n assigned_projects.append(assigned_project.name)\n\n filters.update({\"project\": (\"in\", assigned_projects)})\n roster = frappe.get_all(\"Post Schedule\", filters, fields)\n print(roster)\n for post in roster:\n post.update({\"count\": 1})\n return roster\n\n elif \"Site Supervisor\" in user_roles:\n sites = get_assigned_sites(user_employee.name, project)\n assigned_sites = []\n for assigned_site in sites:\n assigned_sites.append(assigned_site.name)\n filters.update({\"site\": (\"in\", assigned_sites)})\n roster = frappe.get_all(\"Post Schedule\", filters, fields)\n print(roster)\n\n master_data = []\n # for key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['operations_role'])):\n # \temployees = list(group)\n # \tmaster_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\n\n for post in roster:\n post.update({\"count\": 1})\n return roster\n\n elif \"Shift Supervisor\" in user_roles:\n shifts = get_assigned_shifts(user_employee.name, site)\n assigned_shifts = []\n for assigned_shift in shifts:\n assigned_shifts.append(assigned_shift.name)\n filters.update({\"shift\": (\"in\", assigned_shifts)})\n\n roster = frappe.get_all(\"Post Schedule\", filters, fields)\n print(roster)\n # for key, group in itertools.groupby(roster, key=lambda x: (x['post_abbrv'], x['operations_role'])):\n # \temployees = list(group)\n # \tmaster_data.append({\"employees\": employees, \"post\": key[0], \"count\": len(employees)})\n for post in roster:\n post.update({\"count\": 1})\n return roster\n\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef edit_post(post, post_status, start_date, end_date, paid=0, never_end=0, repeat=0, repeat_freq=None):\n try:\n if never_end:\n project = frappe.get_value(\"Operations Post\", post, [\"project\"])\n end_date = frappe.get_value(\"Contracts\", {\"project\": project}, [\"end_date\"])\n if repeat:\n if repeat_freq == \"Daily\":\n for date in pd.date_range(start=start_date, end=end_date):\n create_edit_post(cstr(date.date()), post, post_status, paid)\n elif repeat_freq == \"Weekly\":\n day = getdate(start_date).strftime('%A')\n for date in pd.date_range(start=start_date, end=end_date):\n if date.date().strftime('%A') == day:\n create_edit_post(cstr(date.date()), post, post_status, paid)\n elif repeat_freq == \"Monthly\":\n for date in month_range(start_date, end_date):\n # print(cstr(date.date()))\n if end_date >= cstr(date.date()):\n print(cstr(date.date()))\n create_edit_post(cstr(date.date()), post, post_status, paid)\n else:\n for date in pd.date_range(start=start_date, end=end_date):\n create_edit_post(cstr(date.date()), post, post_status, paid)\n frappe.db.commit()\n return True\n\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\ndef create_edit_post(date, post, post_status, paid):\n if frappe.db.exists(\"Post Schedule\", {\"date\": date, \"post\": post}):\n post_schedule = frappe.get_doc(\"Post Schedule\", {\"date\": date, \"post\": post})\n else:\n post_schedule = frappe.new_doc(\"Post Schedule\")\n post_schedule.post = post\n post_schedule.date = date\n post_schedule.post_status = post_status\n if cint(paid):\n post_schedule.paid = 1\n \n else:\n \n post_schedule.paid = 0\n post_schedule.save(ignore_permissions=True)\n\n\n@frappe.whitelist()\ndef day_off(employee, date, repeat=0, repeat_freq=None, repeat_till=None):\n try:\n if repeat:\n if repeat_freq == \"Daily\":\n for date in pd.date_range(start=date, end=repeat_till):\n create_day_off(employee, cstr(date.date()))\n elif repeat_freq == \"Weekly\":\n day = getdate(date).strftime('%A')\n for date in pd.date_range(start=date, end=repeat_till):\n if date.date().strftime('%A') == day:\n create_day_off(employee, cstr(date.date()))\n elif repeat_freq == \"Monthly\":\n for date in month_range(date, repeat_till):\n # print(cstr(date.date()))\n if repeat_till >= cstr(date.date()):\n print(cstr(date.date()))\n create_day_off(employee, cstr(date.date()))\n else:\n create_day_off(employee, date)\n frappe.db.commit()\n return True\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\ndef month_range(start, end):\n rng = pd.date_range(start=pd.Timestamp(start) - pd.offsets.MonthBegin(),\n end=end,\n freq='MS')\n ret = (rng + pd.offsets.Day(pd.Timestamp(start).day - 1)).to_series()\n ret.loc[ret.dt.month > rng.month] -= pd.offsets.MonthEnd(1)\n return pd.DatetimeIndex(ret)\n\n\ndef create_day_off(employee, date):\n if frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": date}):\n roster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": date})\n roster.shift = None\n roster.shift_type = None\n roster.operations_role = None\n roster.post_abbrv = None\n roster.site = None\n roster.project = None\n else:\n roster = frappe.new_doc(\"Employee Schedule\")\n roster.employee = employee\n roster.date = date\n roster.employee_availability = \"Day Off\"\n roster.save(ignore_permissions=True)\n\n\n@frappe.whitelist()\ndef get_unassigned_project_employees(project, date, limit_start=None, limit_page_length=20):\n try:\n # Todo add date range\n return frappe.get_list(\"Employee\", fields=[\"name\", \"employee_name\"], filters={\"project\": project},\n order_by=\"name asc\",\n limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True)\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_unscheduled_employees(date, shift):\n try:\n employees = frappe.db.sql(\"\"\"\n\t\t\tselect name as employee_id, employee_name \n\t\t\tfrom `tabEmployee`\n\t\t\twhere \n\t\t\t\tshift=\"{shift}\"\n\t\t\tand name not in(select employee from `tabEmployee Schedule` where date=\"{date}\" and shift=\"{shift}\")\n\t\t\"\"\".format(date=date, shift=shift), as_dict=1)\n return employees\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_assigned_employees(shift, date, limit_start=None, limit_page_length=20):\n try:\n # Todo add date range\n return frappe.get_list(\"Employee Schedule\", fields=[\"employee\", \"employee_name\", \"operations_role\"],\n filters={\"shift\": shift, \"date\": date}, order_by=\"employee_name asc\",\n limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True)\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_assigned_projects(employee_id):\n try:\n user, user_roles, user_employee = get_current_user_details()\n if \"Operations Manager\" in user_roles:\n return frappe.get_list(\"Project\", {\"project_type\": \"External\"}, limit_page_length=9999, order_by=\"name asc\")\n\n if \"Projects Manager\" in user_roles:\n return frappe.get_list(\"Project\", {\"account_manager\": employee_id, \"project_type\": \"External\"},\n limit_page_length=9999, order_by=\"name asc\")\n return []\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_assigned_sites(employee_id, project=None):\n try:\n user, user_roles, user_employee = get_current_user_details()\n filters = {}\n if project:\n filters.update({\"project\": project})\n if project is None and (\n \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles):\n return frappe.get_list(\"Operations Site\", limit_page_length=9999, order_by=\"name asc\")\n\n elif \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles:\n return frappe.get_list(\"Operations Site\", filters, limit_page_length=9999, order_by=\"name asc\")\n\n elif \"Site Supervisor\" in user_roles:\n filters.update({\"account_supervisor\": employee_id})\n return frappe.get_list(\"Operations Site\", filters, limit_page_length=9999, order_by=\"name asc\")\n return []\n\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_assigned_shifts(employee_id, project=None, site=None):\n try:\n user, user_roles, user_employee = get_current_user_details()\n filters = {}\n if project:\n filters.update({\"project\": project})\n if site:\n filters.update({\"site\": site})\n\n if site is None and (\n \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles):\n return frappe.get_list(\"Operations Shift\", limit_page_length=9999, order_by=\"name asc\")\n\n elif \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles:\n return frappe.get_list(\"Operations Shift\", filters, limit_page_length=9999, order_by=\"name asc\")\n\n elif \"Shift Supervisor\" in user_roles:\n filters.update({\"supervisor\": employee_id})\n return frappe.get_list(\"Operations Shift\", filters, limit_page_length=9999, order_by=\"name asc\")\n return []\n\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_departments():\n try:\n return frappe.get_list(\"Department\", {\"is_group\": 0}, limit_page_length=9999, order_by=\"name asc\")\n\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_operations_posts(shift=None):\n try:\n user, user_roles, user_employee = get_current_user_details()\n\n if shift is None and (\n \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles):\n return frappe.get_list(\"Operations Post\", limit_page_length=9999, order_by=\"name asc\")\n\n if \"Operations Manager\" in user_roles or \"Projects Manager\" in user_roles or \"Site Supervisor\" in user_roles or \"Shift Supervisor\" in user_roles:\n return frappe.get_list(\"Operations Post\", {\"site_shift\": shift}, \"post_template\", limit_page_length=9999,\n order_by=\"name asc\")\n\n return []\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_designations():\n try:\n return frappe.db.get_list(\"Designation\", limit_page_length=9999, order_by=\"name asc\")\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_post_details(post_name):\n try:\n return frappe.get_value(\"Operations Post\", post_name, \"*\")\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef unschedule_staff(employee, start_date, end_date=None, never_end=0):\n if not employee:\n return response(\"Bad request\", 400, None, \"Employee ID must be entered\")\n\n if not isinstance(employee, str):\n return response(\"Bad request\", 400, None, \"Employee ID has to be a string\")\n\n check = frappe.get_doc(\"Employee\", employee)\n if not check:\n return response(\"Bad Request\", 400, None, \"Employee Does Not Exist\")\n\n try:\n if never_end:\n rosters = frappe.get_all(\"Employee Schedule\", {\"employee\": employee, \"date\": ('>=', start_date)})\n for roster in rosters:\n frappe.delete_doc(\"Employee Schedule\", roster.name, ignore_permissions=True)\n frappe.db.commit()\n return response(\"Success\", 200, None, f\"Employee Unscheduled Successfully !\")\n else:\n if not end_date:\n end_date = start_date\n for date in pd.date_range(start=start_date, end=end_date):\n if frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())}):\n roster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())})\n frappe.delete_doc(\"Employee Schedule\", roster.name, ignore_permissions=True)\n frappe.db.commit()\n return response(\"Success\", 200, None, \"Employee Unscheduled Successfully !\")\n except Exception as e:\n return response(\"Internal Server Error !\", 500, None, e)\n\n@frappe.whitelist()\ndef schedule_staff(employee, shift, operations_role, start_date, end_date=None, never=0, repeat_days=[], day_off=[],\n overtime=0):\n # For each day in the start_date end_date iterable, create an employee schedule for either working or day off\n # depending on if the day falls on a repeat day or day off\n # Key : Monday = 0 ,Sunday = 6\n\n try:\n if type(start_date) == str:\n start_date = convert_string_to_datetime.get_datetime(start_date).date()\n\n if not end_date and not never:\n end_date = start_date\n\n obj_shift = frappe.get_doc(\"Operations Shift\", shift)\n if not obj_shift:\n return response(\"Bad Request\", 400, None, \"Shift Does Not Exist\")\n\n obj_operations_role = frappe.get_doc(\"Operations Role\", operations_role)\n if not obj_operations_role:\n return response(\"Bad Request\", 400, None, \"Operations Role Does Not Exist!\")\n\n if never:\n end_date = cstr(getdate().year) + \"-12-31\"\n\n for date in pd.date_range(start=start_date, end=end_date):\n if getdate(cstr(date.date())).weekday() in repeat_days or getdate(cstr(date.date())).weekday() in day_off:\n if frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())}):\n roster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())})\n else:\n roster = frappe.new_doc(\"Employee Schedule\")\n roster.employee = employee\n roster.date = cstr(date.date())\n if getdate(cstr(date.date())).weekday() in day_off:\n roster.employee_availability = \"Day Off\"\n else:\n roster.employee_availability = \"Working\"\n roster.shift = obj_shift.name\n roster.operations_role = obj_operations_role.name\n if overtime:\n roster.roster_type = \"Over-Time\"\n roster.save(ignore_permissions=True)\n frappe.db.commit()\n return response(\"Success\", 201, None, f\"Employee Scheduled successfully \")\n except Exception as e:\n return response(\"Internal Server Error !\", 500, None, e)\n\n\n@frappe.whitelist()\ndef schedule_leave(employee, leave_type, start_date, end_date):\n try:\n for date in pd.date_range(start=start_date, end=end_date):\n print(employee, date.date())\n if frappe.db.exists(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())}):\n roster = frappe.get_doc(\"Employee Schedule\", {\"employee\": employee, \"date\": cstr(date.date())})\n roster.shift = None\n roster.shift_type = None\n roster.project = None\n roster.site = None\n else:\n roster = frappe.new_doc(\"Employee Schedule\")\n roster.employee = employee\n roster.date = cstr(date.date())\n roster.employee_availability = leave_type\n roster.save(ignore_permissions=True)\n return True\n except Exception as e:\n print(e)\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef post_handover(post, date, initiated_by, handover_to, docs_check, equipment_check, items_check, docs_comment=None,\n equipment_comment=None, items_comment=None, attachments=[]):\n try:\n handover = frappe.new_doc(\"Post Handover\")\n handover.post = post\n handover.date = date\n handover.initiated_by = initiated_by\n handover.handover_to = handover_to\n handover.docs_check = docs_check\n handover.equipment_check = equipment_check\n handover.items_check = items_check\n handover.docs_comment = docs_comment\n handover.equipment_comment = equipment_comment\n handover.items_comment = items_comment\n handover.save()\n\n for attachment in ast.literal_eval(attachments):\n attach_file(filename=random_string(6) + \".jpg\", filedata=base64.b64decode(attachment),\n doctype=handover.doctype, docname=handover.name)\n\n return True\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_handover_posts(shift=None):\n try:\n filters = {\"handover\": 1}\n if shift:\n filters.update({\"site_shift\": shift})\n return frappe.get_list(\"Operations Post\", filters)\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_current_shift(employee):\n \"\"\"This function is to return employee's current Shift,\n\tbased on Shift Assignment. \n\tArgs:\n\t\temployee (str): Employee's ERP ID\n\tReturns:\n\t\tstring: Operation Shift of the assigned shift if it exist.\n\t\"\"\"\n try:\n # fetch datetime\n current_datetime = now_datetime()\n\n # fetch the last shift assignment\n shift = frappe.get_last_doc(\"Shift Assignment\", filters={\"employee\": employee}, order_by=\"creation desc\")\n\n if shift:\n before_time, after_time = frappe.get_value(\"Shift Type\", shift.shift_type,\n [\"begin_check_in_before_shift_start_time\",\n \"allow_check_out_after_shift_end_time\"])\n\n if shift.start_datetime and shift.end_datetime:\n # include early entry and late exit time\n start_time = shift.start_datetime - datetime.timedelta(minutes=before_time)\n end_time = shift.end_datetime + datetime.timedelta(minutes=after_time)\n\n # Check if current time is within the shift start and end time.\n if start_time <= current_datetime <= end_time:\n return shift\n except Exception as e:\n print(frappe.get_traceback())\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_report_comments(report_name):\n try:\n comments = frappe.get_list(\"Comment\", {\"reference_doctype\": \"Shift Report\", \"reference_name\": report_name,\n \"comment_type\": \"Comment\"}, \"*\")\n return comments\n except Exception as e:\n return frappe.utils.response.report_error(e.http_status_code)\n\n\n@frappe.whitelist()\ndef get_filtered_values(start_date, end_date, project=None, site=None, shift=None, operations_role=None,\n limit_start=None, page_length=None):\n \"\"\"\n Dynamically return a list of employee data based on selected filters\n \"\"\"\n try:\n query_dict, post_filters, master_data = {}, {}, {}\n str_filters = f'es.date between \"{start_date}\" and \"{end_date}\"'\n query_dict['shift_working'] = 1\n query_dict['status'] = 'Active'\n if not limit_start:\n limit_start = 0\n if not page_length:\n page_length = 15\n if project:\n query_dict['project'] = project\n post_filters['project'] = project\n if site:\n query_dict['site'] = site\n post_filters['project'] = project\n if shift:\n query_dict['shift'] = shift\n post_filters['project'] = project\n if operations_role:\n query_dict['operations_role'] = operations_role\n post_filters['project'] = project\n str_filters += ' and es.operations_role = \"{}\"'.format(operations_role)\n post_filters.update({'date': ['between', (start_date, end_date)], 'post_status': 'Planned'})\n employees = frappe.get_all(\"Employee\", query_dict, [\"employee\", \"employee_name\"], order_by=\"employee_name asc\",\n start=limit_start, page_length=page_length)\n post_filters.pop('operations_role', None)\n if employees:\n basic_ot_roster = CreateMap(start=start_date, end=end_date, employees=employees, filters=str_filters,\n isOt=None).formated_rs\n the_set = set()\n filtered_dict = {}\n for emp_name, schedule in basic_ot_roster.items():\n emp_op_role = frappe.db.get_value(\"Shift Assignment\", {\"employee_name\": emp_name}, \"operations_role\")\n if emp_op_role:\n role_name = frappe.db.get_value(\"Operations Role\", emp_op_role, \"post_name\")\n the_set.add(role_name)\n if not role_name in filtered_dict.keys():\n filtered_dict.update({role_name: [dict({emp_name: schedule})]})\n else:\n update = dict({emp_name: schedule})\n filtered_dict[role_name].append(update)\n else:\n if not \"other\" in filtered_dict.keys():\n filtered_dict.update({\"other\": [dict({emp_name: schedule})]})\n else:\n update = dict({emp_name: schedule})\n filtered_dict[\"other\"].append(update)\n master_data.update({'employees_data': filtered_dict})\n return response(\"Successful\", 200, master_data)\n return response(\"No Employee fits the query\", 200)\n\n\n except:\n frappe.log_error(frappe.get_traceback(), 'Roster API Error')\n\n\n@frappe.whitelist()\ndef get_opening_values():\n \"\"\"\n\t\tGet the opening values the roster page\n\t\"\"\"\n projects = frappe.db.sql(\"SELECT name from `tabProject` where status = 'Open' \", as_dict=1)\n shifts = frappe.db.sql(\"SELECT name from `tabOperations Shift` \", as_dict=1)\n sites = frappe.db.sql(\"SELECT name from `tabOperations Site` \", as_dict=1)\n employees = frappe.db.sql(\n \"SELECT name, employee_name, designation, department, cell_number from `tabEmployee` where status = 'Active' LIMIT 15 \",\n as_dict=1)\n data = {'projects': projects, 'shifts': shifts, 'sites': sites, 'employees': employees}\n return response(\"Success\", 200, data)\n\n\n@frappe.whitelist()\ndef get_opening_post_values():\n \"\"\"\n\t\tGet the opening values for the roster post page\n\t\"\"\"\n posts = frappe.db.sql(\"SELECT post_name from `tabOperations Post`\", as_dict=1)\n return response(\"Success\", 200, posts)\n","sub_path":"one_fm/api/v2/roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":29687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"227128864","text":"# -*- codeing = utf-8 -*-\n# @Time : 2021/11/12 15:11\n# @Author : zhy\n# @File : CSDN_news_spider.py\n# @Software: PyCharm\n\nimport requests\nfrom urllib.parse import urlencode\nimport re\nfrom CSDN_db import DataManager\nimport parsel\nfrom selenium import webdriver\nfrom pyquery import PyQuery as pq\n\nheaders ={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'\n}\n\ndef get_url_json(base_url):\n params = {\n 'page': 0,\n 'pageSize': 25\n }\n url = base_url + urlencode(params)\n #print(url)\n try:\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.json()\n except requests.ConnectionError as e:\n print('Error', e.args)\n\ndef get_urls():\n base_url = 'https://blog.csdn.net/phoenix/web/blog/hot-rank?'\n json = get_url_json(base_url)\n url_lis = []\n if json:\n items = json.get('data')\n i = 1\n for item in items:\n item = item.get('articleDetailUrl')\n url_lis.append(item)\n i += 1\n if i == 11:\n break\n #print(len(url_lis))\n # print(url_lis)\n return url_lis\n\ndef get_page(url):\n html = requests.get(url,headers = headers)\n return html.text\n\ndef parse_page(html):\n title = re.compile(r' var articleTitle = \"(.*?)\";',re.S)\n title_ans = re.search(title,html).group(1)\n # print(title_ans)\n content = re.compile(r'
(.*?)
',re.S)\n content_ans = re.search(content,html).group(1)\n # print(content_ans)\n return title_ans, content_ans\n\ndef spider():\n url_lis = []\n url_lis = get_urls()\n db_manager = DataManager('dbase')\n db_manager.clear_table()\n for url in url_lis:\n data = {}\n html = get_page(url)\n title, content = parse_page(html)\n data['title'] = title\n data['content'] = content\n db_manager.trans_to_news_table(data)\n db_manager.close_db()\n\n\nif __name__ == '__main__':\n spider()","sub_path":"CSDN_news_spider.py","file_name":"CSDN_news_spider.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"49053351","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport glob\nimport joblib\nimport tqdm\n\nknown_labels = [1, 3, 4, 5, 6, 7, 8,\n 10, 13, 20, 28, 29, 32,\n 33, 35, 36, 37, 38]\n\nnum_classes = len(known_labels)\ntest_list = ['2019']\n\nlog_dir = \"/home/dinardo/phd/pubs/stormseeker/logs/20210329/140313\"\npath = \"/projects/stormseeker/data/\"\nfiles = glob.glob(os.path.join(path, '*.csv'))\n\nprint(\"========= Label Mapping =========\")\nlabel_map = {}\nfor i, l in enumerate(known_labels):\n label_map[i] = l\n\nmodel = tf.keras.models.load_model(log_dir)\nmodel.trainable = False\n\nx_scaler = joblib.load(os.path.join(log_dir, 'scaler.pkl'))\n\n\ndef evaluate(records, set_type, model, x_scaler, label_map):\n print(f\"Evaluate on {set_type} set\")\n for df_test, fn in records:\n df_test = df_test.loc[df_test['label'].isin(known_labels)]\n df_test.fillna(0.0, inplace=True)\n\n y_test = df_test.pop('label')\n y_test = np.expand_dims(y_test, -1)\n\n coords_idx = [df_test.pop('j'), df_test.pop('i'),\n df_test.pop('lat'), df_test.pop('lon')]\n\n df_coord = pd.concat(coords_idx, axis=1)\n\n for i, l in enumerate(known_labels):\n y_test[y_test == l] = i\n\n x_test = x_scaler.transform(df_test)\n x_test = np.expand_dims(x_test, axis=1)\n\n test_loss, test_acc = model.evaluate(x_test, y_test)\n print(f\"Test {fn} > loss: {test_loss} - acc: {test_acc}\")\n\n y_logits = model.predict(x_test)\n y_pred = np.argmax(y_logits, axis=-1)\n\n y_pred_map = []\n true_labels = []\n for pred, yt in zip(y_pred, y_test):\n y_pred_map.append(label_map[int(pred)])\n true_labels.append(label_map[int(yt)])\n\n df_coord['predictions'] = y_pred_map\n df_coord['true_labels'] = true_labels\n filename = fn.split('/')[-1]\n df_coord.to_csv(os.path.join(log_dir, f\"{set_type}_out_{filename}\"), index=False)\n\n\nprint(\"Loading test set\")\ndf_tests = [(pd.read_csv(file), file) for file in tqdm.tqdm(files)\n if any(t in file for t in test_list)]\nevaluate(df_tests, 'test', model, x_scaler, label_map)\n","sub_path":"src/stage3/test_simple.py","file_name":"test_simple.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"465480352","text":"# mind that this program thinks that you where born at 00:00:00 \r\n# this app is based on the Gregorian calender\r\nsal_tavalod= int(input(\"sal tavalod khod ra vared konid\"))\r\nmah_tavalod= int(input('mah tavalod khod ra vared konid'))\r\nroz_tavalod= int(input(\"roz tavalod khod ra vared konid \"))\r\nimport datetime\r\ndate_and_time = datetime.datetime.now()\r\nyear = (date_and_time.year)\r\nmonth = (date_and_time.month)\r\nday = (date_and_time.day)\r\nhour = (date_and_time.hour)\r\nminute = (date_and_time.minute)\r\nsecond = (date_and_time.second)\r\nif mah_tavalod <= month:\r\n age_year = year - sal_tavalod\r\n age_month = month - mah_tavalod\r\nelse :\r\n age_year = (year - sal_tavalod) - 1\r\n age_month = 12 + (month - mah_tavalod)\r\n if roz_tavalod <=day :\r\n age_day = day - roz_tavalod\r\n else:\r\n age_day = 30 + (day - roz_tavalod)\r\n age_month = age_month - 1\r\nage_hour = hour \r\nage_minute = minute \r\nage_second = second\r\nprint (age_year ,'years' ,age_month ,'months' ,age_day ,'days' ,\r\n age_hour ,'hours' ,age_minute ,'minutes' ,age_second ,'seconds')\r\n","sub_path":"sen.py","file_name":"sen.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"363973136","text":"\"\"\"student_managemnt_system URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom student_managemnt_system import settings\nfrom student_mnagament_app import views, HodViews\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('demo/',views.showDempPage),\n path('',views.showLoginPage),\n path('dologin',views.doLogin),\n path('get_user_details',views.getUserDetails),\n path('logout_user',views.logoutUser),\n path('admin_home',HodViews.admin_home),\n path('add_staff',HodViews.add_staff),\n path('add_staff_save',HodViews.add_staff_save),\n path('add_course_save',HodViews.add_course_save),\n path('add_course',HodViews.add_course),\n path('add_student',HodViews.add_student),\n path('add_student_save',HodViews.add_student_save),\n path('add_subject',HodViews.add_subject),\n path('add_subject_save',HodViews.add_subject_save),\n path('admin/', admin.site.urls),\n]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)+static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)\n","sub_path":"student_managemnt_system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"329421251","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 17 23:11:04 2018\n\n@author: eshikasaxena\n\"\"\"\n\nimport pandas as pd\nimport csv\n\ninpath = '..\\\\HemaCam-Data\\\\Segmented_Cells\\\\Cell_Properties'\noutpath = '..\\\\HemaCam-Data\\\\Segmented_Cells\\\\Cell_Properties'\ncsvInpfile = inpath + '\\\\Sickle3_data_results.csv'\nhtmlOutfile = outpath + '\\\\pandas.html'\n\ndf = pd.read_csv(csvInpfile)\nfile = open(htmlOutfile, 'w')\nfile.write(df.to_html(justify='center', escape = False))\nfile.close()\n\n\n\n\nsummaryOutfile = outpath + '\\\\summary.html'\nsummaryOutCsv = outpath + '\\\\summary.csv'\n\n#summaryDF = df.groupby('Result').count()\n\n\n#CODE STARTS HERE \n\ncount = df['Result'].count()\ndata = df['Result'].value_counts(dropna=True)\nregular = data.get('Regular')\nsickle = data.get('Sickle')\ntotal_data = [['Regular Cells', str(regular)], ['Sickle Cells', str(sickle)], ['Total Cells', str(count)]]\n\nwith open(summaryOutCsv, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['Summary', 'Count'])\n for x in total_data:\n writer.writerow(x)\n# writer.writerow(['Count', 'Regular', 'Sickle'])\n# writer.writerow([count, regular, sickle])\n\n\ndf = pd.read_csv(summaryOutCsv)\nfile = open(summaryOutfile, 'w')\nfile.write(df.to_html(justify='center', escape = False))\nfile.close()\nf.close()\n\n\n#print('END ')\n\n\n\n#summaryHeader = ['Total']\n#row1 = ['Total cells', df['Result'].count()]\n#row2 = [df['Result'].value_counts(dropna=False) ]\n##row2 = [df['Result'].count() ]\n#sf = pd.DataFrame([row2], columns=summaryHeader)\n#print(sf)\n#print (sf)\n#print (row2)\n\n#print (df)\n\n#summaryFile = open(summaryOutfile, 'w')\n## summaryFile.write(summaryDF.to_html(justify='center', escape = False))\n\n#summaryFile.write(summaryDF.ix[:, 2].to_html(justify='center', escape = False))\n#summaryFile.close()\n\n\n \n#df = pd.read_csv(outpath)\n#df.head(8)\n#\n##columns = ['Cell Image', 'Count', 'Perimeter', 'Area', 'Circularity', 'Major Axis', 'Width', 'Length']\n#df = pd.read_csv(csvOutfile)\n##df.head(8)\n##print (df)\n##print (df.to_html())\n#\n#file = open(htmlOutfile, 'w')\n#file.write(df.to_html(escape = False))\n#\n\n#from IPython.display import HTML\n#df = pd.DataFrame({'A': np.linspace(1, 10, 10)})\n## this file is in the same folder as the notebook I'm using on my drive\n#df['image'] = 'so-logo.png'\n#df['new'] = df['image'].apply(lambda x: ''.format(x) if x else '')\n#HTML(df.to_html(escape=False))\n\n# Begin changes\n# features = [ ''] + [count] + features\n \n# End changes\n\n\n\n#\n#def image_formatter(im):\n# return f''\n#\n#HTML(dogs[['breed', 'image']].to_html(formatters={'image': image_formatter}, escape=False))\n\n\n#columns = ['Cell Image', 'Count', 'Perimeter', 'Area', 'Circularity', 'Major Axis', 'Minor Axis', 'Ratio']\n#df = pd.read_csv(outpath, names=columns)\n\n# This you can change it to whatever you want to get\n# age_15 = df[df['age'] == 'U15']\n# Other examples:\n# bye = df[df['opp'] == 'Bye']\n# crushed_team = df[df['ACscr'] == '0']\n# crushed_visitor = df[df['OPPscr'] == '0']\n# Play with this\n\n# Use the .to_html() to get your table in html\n# print(crushed_visitor.to_html())\n#print (df.to_html())\n","sub_path":"HemaCam Code/PandasTest.py","file_name":"PandasTest.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"410763437","text":"from django.shortcuts import render\n\nfrom .models import _model_to_tuple, _keyvalue_to_str, GeneralInfo, Contact\n\n\nKNOWN_CONTACT_TYPES = ['Email', 'Jabber', 'Skype']\n\n\ndef _is_multiline(str):\n return '\\n' in str\n\n\ndef _split_contacts(contacts):\n known_contacts = []\n other_contacts = []\n for type, id in contacts:\n if type in KNOWN_CONTACT_TYPES:\n known_contacts.append((type, id))\n else:\n other_contacts.append((type, id))\n return (known_contacts, other_contacts)\n\n\ndef _contacts_to_string(contacts):\n return '\\n'.join([\n _keyvalue_to_str(type, id) for type, id in contacts\n ])\n\n\ndef _add_multiline_flag(tuples_list):\n return [(key, {\n 'value': entry,\n 'multiline': _is_multiline(entry),\n }) for key, entry in tuples_list]\n\n\ndef contact_page(request):\n general_info = map(_model_to_tuple, GeneralInfo.objects.all())\n contacts = map(_model_to_tuple, Contact.objects.all())\n\n general_info = _add_multiline_flag(general_info)\n\n known_contacts, other_contacts = _split_contacts(contacts)\n known_contacts = _add_multiline_flag(known_contacts)\n other_contacts = _contacts_to_string(other_contacts)\n\n return render(request, 'contact_page/contact_page.html', {\n \"general_info\": general_info,\n \"contacts\": known_contacts,\n \"other_contacts\": other_contacts,\n })\n","sub_path":"apps/contact_page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"449627650","text":"import os\nimport re\nimport json\n\n\n#from optionstrader.database import Database\nfrom optionstrader.webservice import Webservice\n\nclass Parser:\n\n def __init__(self):\n '''\n This class is only used for when text needs parsing\n '''\n return\n\n def add_stock_to_database(self, symbol):\n # DEPRECATED FUNCTION\n # This was used to download a list of ticker symbols and current prices.\n # Depricated since we are moving foward with tradier, not yahoo as the\n # source for the data.\n\n webservice = Webservice()\n parsed_json = json.loads(webservice.get_from_yahoo(symbol))\n dictionary = parsed_json['query']['results']['quote']\n\n database = Database()\n\n column_string = \"\"\n for i in dictionary.items():\n column_string = column_string + \", \" + i[0]\n\n column_string = \"(\" + column_string[2:] + \")\"\n\n value_string = \"\"\n for i in dictionary.items():\n value_string = value_string + \"\\\", \\\"\" + str(i[1])\n value_string = \"(\\\"\" + value_string[4:] + \"\\\")\"\n\n # Because for some reason there are two \"Symbol\" fields\n column_string = column_string.replace(\"Symbol\", \"Symbol_2\")\n column_string = column_string.replace(\", Change, \", \", Change_percent, \")\n column_string = column_string.replace(\", Name, \", \", Name_of_company, \")\n column_string = column_string.replace(\", Open, \", \", Open_price, \")\n\n database.insert_values_into_table(column_string, value_string)\n #print(column_string)\n #print(value_string)\n\n database.close_connection()\n #print(noob)\n print(\"%s Added to database.\") % (symbol)\n\n def extract_symbols(self):\n \"\"\" This method sanitizes ticker symbols read from disk.\n\n Returns:\n list() of list() of sanitized ticker symbols. Example:\n [['T', 'AT&T Inc.'], ['V', 'Visa Inc']]\n \"\"\"\n file_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')), 'nasdaq_symbols.txt')\n #file_data = open(file_path, 'r')\n with open(file_path, 'r') as file_data:\n file_string = file_data.read()\n regex = re.compile(\"^[A-z]{1,}\\|.+?\\|\", re.MULTILINE)\n symbols_and_names = re.findall(regex, file_string)\n\n cleaned_symbols_and_names = []\n for i in symbols_and_names:\n cleaned_symbols_and_names.append(i.split(\"|\")[:-1])\n\n return cleaned_symbols_and_names\n #for i in output:\n # self.add_stock_to_database(i[:-1])\n","sub_path":"optionstrader/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"172577665","text":"import plotly.express as px\nimport plotly\n\nwind = px.data.wind()\nfig = px.line_polar(wind, r=\"frequency\",\n theta=\"direction\", color=\"strength\", line_close=True,\n color_discrete_sequence=px.colors.sequential.Plasma,\n template=\"plotly_white\",)\n\nfig.update_layout(\n title=\"Wind Strength\"\n)\n\nfig.update_traces(fill='toself')\n\nplotly.offline.plot(fig, filename='polarplot_px.html')\nfig.show()\n","sub_path":"plotly/polarchart/polar_line.py","file_name":"polar_line.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"640212601","text":"''' \nIn this example we solve the Poisson equation over an L-shaped domain\nwith fixed boundary conditions. We use the RBF-FD method. The RBF-FD\nmethod is preferable over the spectral RBF method because it is\nscalable and does not require the user to specify a shape parameter\n(assuming that we use odd order polyharmonic splines to generate the\nweights).\n'''\nimport numpy as np\nfrom rbf.fd import weight_matrix\nfrom rbf.basis import phs3\nfrom rbf.geometry import contains\nfrom rbf.nodes import menodes\nimport matplotlib.pyplot as plt\nfrom scipy.sparse import vstack\nfrom scipy.sparse.linalg import spsolve\nfrom scipy.interpolate import LinearNDInterpolator\n\n# Define the problem domain with line segments.\nvert = np.array([[0.0,0.0],[2.0,0.0],[2.0,1.0],\n [1.0,1.0],[1.0,2.0],[0.0,2.0]])\nsmp = np.array([[0,1],[1,2],[2,3],[3,4],[4,5],[5,0]])\n\nN = 500 # total number of nodes.\n\nn = 20 # stencil size. Increase this will generally improve accuracy\n # at the expense of computation time.\n\nbasis = phs3 # radial basis function used to compute the weights. Odd\n # order polyharmonic splines (e.g., phs3) have always\n # performed well for me and they do not require the user\n # to tune a shape parameter. Use higher order\n # polyharmonic splines for higher order PDEs.\n\norder = 2 # Order of the added polynomials. This should be at least as\n # large as the order of the PDE being solved (2 in this\n # case). Larger values may improve accuracy\n\n# generate nodes\nnodes,smpid = menodes(N,vert,smp) \nedge_idx, = (smpid>=0).nonzero() \ninterior_idx, = (smpid==-1).nonzero() \n# create \"left hand side\" matrix\nA_int = weight_matrix(nodes[interior_idx],nodes,diffs=[[2,0],[0,2]],\n n=n,basis=basis,order=order)\nA_edg = weight_matrix(nodes[edge_idx],nodes,diffs=[0,0]) \nA = vstack((A_int,A_edg))\n# create \"right hand side\" vector\nd_int = -1*np.ones_like(interior_idx)\nd_edg = np.zeros_like(edge_idx)\nd = np.hstack((d_int,d_edg))\n# find the solution at the nodes\nu_soln = spsolve(A,d) \n# interpolate the solution on a grid\nxg,yg = np.meshgrid(np.linspace(-0.05,2.05,400),np.linspace(-0.05,2.05,400))\npoints = np.array([xg.flatten(),yg.flatten()]).T \nu_itp = LinearNDInterpolator(nodes,u_soln)(points)\n# mask points outside of the domain\nu_itp[~contains(points,vert,smp)] = np.nan \nug = u_itp.reshape((400,400)) # fold back into a grid\n# make a contour plot of the solution\nfig,ax = plt.subplots()\np = ax.contourf(xg,yg,ug,np.linspace(0.0,0.16,9),cmap='viridis')\nax.plot(nodes[:,0],nodes[:,1],'ko',markersize=4)\nfor s in smp:\n ax.plot(vert[s,0],vert[s,1],'k-',lw=2)\n\nax.set_aspect('equal')\nfig.colorbar(p,ax=ax)\nfig.tight_layout()\nplt.savefig('../figures/fd.i.png')\nplt.show()\n\n","sub_path":"prj_src/nn_examples/rbf_examples/RBF-master/docs/scripts/fd.i.py","file_name":"fd.i.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"234752360","text":"from collections import deque, defaultdict\nfrom itertools import product\nimport re\n\nimport enum\nimport os\n\n\n############################################################################\n#\n# Reading from files.\n\ndef read_lines(file_name):\n return list(filter(None, open(file_name).read().split('\\n')))\n\ndef read_paragraphs(file_name):\n return list(filter(None, open(file_name).read().split('\\n\\n')))\n\ndef parse_items(items, parser):\n return list(map(parser, items))\n\n\n############################################################################\n#\n# Grids: 2D square and hex grids.\n\nclass grid:\n \"\"\"\n 2D grid. Supports life- algorithm by producing a new grid\n based on a is_alive function taking as parameter if the old grid\n position was alive and the number of immediate neighbours.\n \"\"\"\n def __init__(self, dir_to_moves):\n self._grid = set()\n self._dir_to_moves = dir_to_moves\n\n @staticmethod\n def hex_grid():\n return grid(( (-1, 0), (0, -1), (1, -1), (1, 0), (0, 1), (-1, 1), ))\n\n @staticmethod\n def square_grid():\n return grid(( (1, 0), (0, -1), (-1, 0), (0, 1), ))\n\n def clone(self):\n other = grid(self._dir_to_moves)\n other._grid = set(self._grid)\n return other\n\n def add(self, pos):\n self._grid.add(pos)\n\n @staticmethod\n def move_pos(pos: tuple, dir: tuple):\n return (pos[0] + dir[0], pos[1] + dir[1])\n \n def count_around(self, pos: tuple):\n count = 0\n for dir in self._dir_to_moves:\n if grid.move_pos(pos, dir) in self._grid:\n count += 1\n return count\n\n def evolve(self, is_alive):\n new_grid = grid(self._dir_to_moves)\n for pos in self._grid:\n count = self.count_around(pos)\n if is_alive(True, count):\n new_grid.add(pos)\n for dir in self._dir_to_moves:\n new_pos = grid.move_pos(pos, dir)\n if new_pos not in self._grid:\n count = self.count_around(new_pos)\n if is_alive(False, count):\n new_grid.add(new_pos)\n return new_grid\n\n\n############################################################################\n#\n# URL stuff.\n\nimport requests\nimport json\n\n_requests_session = None\ndef get_session() -> requests.Session:\n global _requests_session\n if not _requests_session:\n _requests_session = requests.Session()\n return _requests_session\n\n\nclass Method(enum.Enum):\n GET = 1\n POST = 2\n PUT = 3\n DELETE = 4\n\n\n_base_url = None\n\ndef set_base_url(url: str):\n global _base_url\n _base_url = url\n\ndef build_full_url(url: str) -> str:\n global _base_url\n if not _base_url:\n return url\n return _base_url + url\n\n\ndef get_url(\n url: str = '',\n method: Method = Method.GET,\n params: dict = None,\n in_headers: dict = None,\n in_json: dict = None) -> dict:\n \"\"\"\n Call a rest API.\n The url will be combined with the global base URL, if set.\n Return a tuple of (JSON, headers, status code).\n \"\"\"\n full_url = build_full_url(url)\n\n session = get_session()\n\n methods = {\n Method.GET: session.get,\n Method.POST: session.post,\n Method.PUT: session.put,\n Method.DELETE: session.delete,\n }\n meth = methods[method]\n \n with meth(full_url, params=params, headers=in_headers, json=in_json, stream=False) as response:\n try:\n received_json = response.json()\n except:\n received_json = {}\n received_content = response.content\n received_headers = response.headers\n received_code = response.status_code\n\n return (received_json, received_headers, received_content, received_code)\n\ndef get_url_json(url: str = '', **kwargs):\n return get_url(url, **kwargs)[0]\n\ndef get_url_headers(url: str = '', **kwargs):\n return get_url(url, **kwargs)[1]\n\ndef get_url_content(url: str = '', **kwargs):\n return get_url(url, **kwargs)[2]\n\ndef get_url_status(url: str = '', **kwargs):\n return get_url(url, **kwargs)[3]\n\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"242089979","text":"from typing import List\n\nfrom adventofcode2019.utils.abstract import FileReaderSolution\n\n\nclass Day08:\n frames: List[str]\n width = 0\n height = 0\n\n def load_image(self, image_data: str, width: int, height: int):\n \"\"\" Load an image from `image_data`, with `width` and `height` dimensions\"\"\"\n frame_size = width * height\n self.frames = [\n image_data[i : i + frame_size]\n for i in range(0, len(image_data), frame_size)\n ]\n\n self.width = width\n self.height = height\n\n def count_number_per_frame(self, number: int, frame: int) -> int:\n \"\"\" Count the number of times `number` is in frame `frame \"\"\"\n needle = str(number)\n return self.frames[frame].count(needle)\n\n def layer_with_fewest_digit(self, number_to_find: int):\n \"\"\" Find the layer with the fewest corrences of `number_to_find` \"\"\"\n found = {}\n for x in range(0, len(self.frames)):\n found[x] = self.count_number_per_frame(number=number_to_find, frame=x)\n return min(found, key=found.get) # type: ignore\n\n @staticmethod\n def _compute_per_pixel(pixels: List[int]) -> int:\n for pixel in pixels:\n if pixel == 2:\n continue\n else:\n return pixel\n return 0\n\n def get_computed_image(self):\n \"\"\" Compute the image\"\"\"\n resulting_image = []\n for x in range(0, len(self.frames[0])):\n pixels = [int(frame[x]) for frame in self.frames]\n result = self._compute_per_pixel(pixels)\n resulting_image.append(result)\n return resulting_image\n\n def printable_image(self, image_data: List[int]):\n lines = [\n \"\".join(map(str, image_data[i : i + self.width]))\n for i in range(0, len(image_data), self.width)\n ]\n return \"\\n\".join(lines)\n\n\nclass Day08PartA(Day08, FileReaderSolution):\n def solve(self, input_data: str) -> int:\n self.load_image(input_data, width=25, height=6)\n # Find layer with the fewest 0 digits:\n layer = self.layer_with_fewest_digit(0)\n number_ones = self.count_number_per_frame(1, layer)\n number_twos = self.count_number_per_frame(2, layer)\n return number_ones * number_twos\n\n\nclass Day08PartB(Day08, FileReaderSolution):\n def solve(self, input_data: str) -> str:\n self.load_image(input_data, width=25, height=6)\n image = self.get_computed_image()\n printable = self.printable_image(image)\n result = printable.replace(\"0\", \" \").replace(\"1\", \"■\")\n return result\n","sub_path":"src/adventofcode2019/solutions/day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"224410643","text":"from functools import reduce\r\ndef prime(num):\r\n for j in range(2, num):\r\n if num % j == 0:\r\n return False\r\n else:\r\n return True\r\n\r\ndef main():\r\n arr = []\r\n print(\"Enter number of elements\")\r\n size = int(input())\r\n \r\n for i in range(size):\r\n print(\"Enter element number :\",i+1)\r\n no = int(input())\r\n arr.append(no)\r\n \r\n print(\"Your entered data is :\",arr)\r\n\t\r\n primearr = list(filter(prime, arr))\r\n \r\n print(\"after filtering data is: \",primearr)\r\n \r\n modarr = list(map(lambda no: no*2, primearr))\r\n print(\"After mapping data is: \",modarr)\r\n \r\n sum = reduce(lambda x,y: x if x > y else y, modarr)\r\n print(\"After reduce data is: \",sum)\r\n\r\n\t\r\nif __name__=='__main__':\r\n main();\r\n\r\n","sub_path":"assignment4/assignment4_5.py","file_name":"assignment4_5.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"287666971","text":"from gevent import monkey\nmonkey.patch_all()\n\nfrom argparse import ArgumentParser\nimport os\nfrom gsn.bioinfo.python.tools.S3Connect.S3AccessMulti import S3AccessMulti as S3Access\n\ndef main():\n aparser = ArgumentParser()\n aparser.add_argument(\"--source\", dest=\"source\", help=\"Source path\", default = None, type=str)\n aparser.add_argument(\"--destination\", dest=\"destination\", help=\"Destination\", default = None, type=str)\n aparser.add_argument(\"--config\", dest=\"configFile\", help=\"S3 config file.\", default = None, type=str)\n aparser.add_argument(\"--verbose\", dest=\"verbose\", default = True, type=bool)\n options = vars(aparser.parse_args())\n\n s3Access = S3Access(options['configFile'])\n\n if options['source'].count(':') > 0:\n bucket = options['source'].split(':')[0]\n s3Path = options['source'].split(':')[1]\n localPath = options['destination']\n\n if not os.path.exists(localPath):os.makedirs(localPath)\n trans = s3Access.getDir(bucket, s3Path , localPath, force=True, verbose=options['verbose'])\n\n else:\n bucket = options['destination'].split(':')[0]\n s3Path = options['destination'].split(':')[1]\n localPath = options['source']\n trans = s3Access.putDir(bucket, localPath, s3Path, force=True, verbose=options['verbose'])\n\n if not trans['success']:\n raise Exception('Data transfer failed')\n\nif __name__ == '__main__':\n main()","sub_path":"S3Interface.py","file_name":"S3Interface.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"493562444","text":"print('GERADOR DE PA')\nprint('-=' * 15)\na1 = int(input('Primeiro termo: '))\nr = int(input('Razão da PA: '))\ntermo = a1\ncont = 1\nwhile cont <= 10:\n print('{} -> '.format(termo), end='')\n termo += r\n cont += 1\nprint('FIM')\n\n\n","sub_path":"Mundo - 2/Desafio061.py","file_name":"Desafio061.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"173177822","text":"import tensorflow as tf\n\nfrom nalp.corpus import TextCorpus\nfrom nalp.datasets import LanguageModelingDataset\nfrom nalp.encoders import IntegerEncoder\nfrom nalp.models.generators import RNNGenerator\n\n# Creating a character TextCorpus from file\ncorpus = TextCorpus(from_file=\"data/text/chapter1_harry.txt\", corpus_type=\"char\")\n\n# Creating an IntegerEncoder, learning encoding and encoding tokens\nencoder = IntegerEncoder()\nencoder.learn(corpus.vocab_index, corpus.index_vocab)\nencoded_tokens = encoder.encode(corpus.tokens)\n\n# Creating Language Modeling Dataset\ndataset = LanguageModelingDataset(\n encoded_tokens, max_contiguous_pad_length=10, batch_size=64, shuffle=True\n)\n\n# Creating the RNN\nrnn = RNNGenerator(\n encoder=encoder, vocab_size=corpus.vocab_size, embedding_size=256, hidden_size=512\n)\n\n# As NALP's RNNs are stateful, we need to build it with a fixed batch size\nrnn.build((64, None))\n\n# Compiling the RNN\nrnn.compile(\n optimizer=tf.optimizers.Adam(learning_rate=0.001),\n loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.metrics.SparseCategoricalAccuracy(name=\"accuracy\")],\n)\n\n# Fitting the RNN\nrnn.fit(dataset.batches, epochs=200)\n\n# Evaluating the RNN\n# rnn.evaluate(dataset.batches)\n\n# Saving RNN weights\nrnn.save_weights(\"trained/rnn\", save_format=\"tf\")\n","sub_path":"examples/models/generators/train_rnn.py","file_name":"train_rnn.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"550671445","text":"import cv2\nfrom djitellopy import tello\n\ntello = tello.Tello()\ntello.connect()\nbattery_level = tello.get_battery()\nprint(battery_level)\ntello.streamon()\n\n# cap = cv2.VideoCapture(0)\n# w = cap.get(3)\n# h = cap.get(4)\n# print(\"w = \", w)\n# print(\"h = \", h)\n# ret, img = cap.read()\n\nwhile True:\n img = tello.get_frame_read().frame\n img = cv2.resize(img, (360, 240))\n faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(imgGray, 1.2, 8)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cx = x + w // 2\n cy = y + h // 2\n area = w * h\n cv2.circle(img, (cx, cy), 5, (0, 255, 0), cv2.FILLED)\n print('area =', area)\n\n cv2.imshow('frame',img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release everything if job is finished\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n","sub_path":"Day04/findFaceTello01.py","file_name":"findFaceTello01.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"123505762","text":"string1 = '543987'\nstring = '987654321'\n\nresult = [5,[4,3],9,[8,7]]\n\n#print string[0]\n\ndef numbers_in_lists(string):\n leader = int(string[0]) - 1\n output = []\n lowerlist = []\n for e in range(len(string)):\n if int(string[e]) > leader:\n if len(lowerlist) > 0:\n output.append(lowerlist)\n #print string[e] + \" is greater than \" + str(leader)\n leader = int(string[e])\n #print string[e] + \" is the new leader!!\"\n output.append(int(string[e]))\n #print \"The leaderpos is \" + str(leaderpos)\n lowerlist = []\n else:\n #print string[e] + \" is less than / equal to \" + str(leader)\n lowerlist.append(int(string[e]))\n #print lowerlist\n if len(lowerlist) > 0:\n output.append(lowerlist)\n return output\n\nnumbers_in_lists(string) ","sub_path":"Lesson3/NumberinList.py","file_name":"NumberinList.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"136740177","text":"#!/usr/bin/env python\n# Copyright (c) 2013-2016, Neville-Neil Consulting\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# Neither the name of Neville-Neil Consulting nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# File: $Id:$\n#\n# Author: Mike Karels\n#\n# Description: A simple re-implementation of the ping6(8) program in\n# Python using the Packet Construction Set\n\nfrom pcs.packets.localhost import *\nfrom pcs.packets.ethernet import *\nfrom pcs.packets.ipv6 import *\nfrom pcs.packets.icmpv6 import *\nfrom pcs.packets.payload import *\nfrom pcs import *\nfrom time import sleep\nimport ipv6ext\n\ndef main():\n\n from optparse import OptionParser\n \n parser = OptionParser()\n parser.add_option(\"-c\", \"--count\",\n dest=\"count\", default=1,\n help=\"Stop after sending (and recieving) count ECHO_RESPONSE packets..\")\n \n parser.add_option(\"-D\", \"--dont_fragment\",\n dest=\"df\", default=False,\n help=\"Set the Don't Fragment bit.\")\n\n parser.add_option(\"-s\", \"--ip_source\",\n dest=\"ip_source\", default=None,\n help=\"The IP source address.\")\n\n parser.add_option(\"-d\", \"--ip_dest\",\n dest=\"ip_dest\", default=None,\n help=\"The IP destination address.\")\n\n parser.add_option(\"-I\", \"--ether_iface\",\n dest=\"ether_iface\", default=None,\n help=\"The name of the source interface.\")\n\n parser.add_option(\"-e\", \"--ether_source\",\n dest=\"ether_source\", default=None,\n help=\"The host Ethernet source address.\")\n\n parser.add_option(\"-g\", \"--ether_dest\",\n dest=\"ether_dest\", default=None,\n help=\"The gateway Ethernet destination address.\")\n\n parser.add_option(\"-r\", \"--rt_hop\",\n dest=\"hop\", default=\"::\",\n help=\"The intermediate router address.\")\n\n (options, args) = parser.parse_args()\n \n rtcount = 1\n # plen is 8 + rtcount * 16 for rt hdr, 8 for ICMP, 6 for foobar\n plen = 8 + rtcount * 16 + 8 + 6\n c = ethernet(src=ether_atob(options.ether_source), \\\n dst=ether_atob(options.ether_dest)) / \\\n ipv6(hop=64, next_header = 43, length = plen, \\\n src=inet_pton(AF_INET6, options.ip_source), \\\n dst=inet_pton(AF_INET6, options.ip_dest)) / \\\n ipv6ext.rt_ext(next_header = 58, \\\n\t\t addr1 = inet_pton(AF_INET6, options.hop)) / \\\n icmpv6(type=ICMP6_ECHO_REQUEST, id=12345) / \\\n payload(payload=\"foobar\")\n\n c.calc_lengths()\n\n #\n # Increment ICMP echo sequence number with each iteration.\n #\n output = PcapConnector(options.ether_iface)\n ip = c.packets[1]\n icmpecho = c.packets[3]\n count = int(options.count)\n while (count > 0):\n # c.calc_checksums()\n # icmpecho.cksum = icmpv6.cksum(icmpecho, ip)\n # icmpecho.calc_checksum()\n c.encode()\n\n out = output.write(c.bytes, len(c.bytes))\n# packet = input.read()\n# print packet\n sleep(1)\n count -= 1\n icmpecho.sequence += 1\nmain()\n","sub_path":"scripts/ping6rt.py","file_name":"ping6rt.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"356747445","text":"import infrastructure as infra\nimport numpy as np\nimport pickle\n\n'''\n\t@Param \tload \tDefault = 1, determines if we should calculate\n\t\t\t\t\ttraining data or load it from disk.\n\tThis function creates a training dataset.\n'''\ndef train(load = 1):\n\tif(load):\n\t\tdigit_matrices = pickle.load(open('train.data', 'rb'))\n\telse:\n\t\t\n\t\t\n\t\tlabels = open('digitdata/traininglabels', 'r')\n\t\trepresentation = labels.readlines()\n\n\t\timages = open('digitdata/trainingimages', 'r')\n\n\n\t\tdigit_matrices = infra.digit_list()\n\t\tdigit_matrices.count = len(representation)\n\n\t\tfor line in representation:\n\t\t\tpopulate_category(line, digit_matrices, images)\n\t\t\t\n\t\tlabels.close()\n\t\timages.close()\n\n\t\t# x = 0\n\t\t# for each_class in digit_matrices.frequencies:\n\t\t# \tx+= each_class.count\n\t\t\n\t\t# print x\t\n\t\tpickle.dump(digit_matrices, open('train.data', 'wb'))\n\treturn digit_matrices\n\t\n'''\n\t@Param\tline \tline from file - this should be in integer representing\n\t\t\t\t\tthe current image's category\n\t\t\tdigit_matricies \tDigit-list, used to store frequency information\n\t\t\timgaes \tfile of \"images\" from which we want to generate training data\n'''\ndef populate_category(line, digit_matrices, images):\n\tdigit_class = int(line)\n\tlook_at_this = digit_matrices.frequencies[digit_class].matrices\n\tdigit_matrices.frequencies[digit_class].count+=1\n\tfor i in range(28):\n\t\tcur_line = images.readline()\n\t\tfor j in range(28):\n\t\t\tif cur_line[j] != ' ':\n\t\t\t\tlook_at_this[1][i][j] +=1\n\t\t\telse:\n\t\t\t\tlook_at_this[0][i][j] +=1\n\n\n\t\n\n\nif __name__ == '__main__':\n\ttrain()","sub_path":"read_training.py","file_name":"read_training.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"639086300","text":"# File object is passed in here and first pass is made to extract relevant lines \r\nclass CSV:\r\n \r\n def __init__(self, lRaw=None, antype=None): #of = file object \r\n import re\r\n import statistics\r\n self.antype = antype #type of analysis\r\n self.lRaw = lRaw\r\n self.lRes = [] #all extracted data \r\n self.lCnt = None\r\n self.lStats = [] #\r\n \r\n if antype == 'freq' :\r\n self.rgx = re.compile(r'(\\d+).*?(?P